prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
Copyright (c) 2021, Stanford Neuromuscular Biomechanics Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib import rc
import pandas as pd
rc('font',**{'family':'sans-serif','sans-serif':['Arial']})
METRICS = ['auroc','ap']
N_SUBJ = [7, 4]
RESULT_DIR = '../results/'
DATASET_RESULT_DIRS = [RESULT_DIR + 'imus6_subjects7/',
RESULT_DIR + 'imus11_subjects4/']
# set up key, value pairs for combining certain sensor sets into
# one result
LABELS_TO_COMBINE_IMUS6 = {
'6_chest + lumbar + ankles + feet':
['sensors06_chest_lumbar_ankles_feet'],
'1_chest': ['sensors01_chest'],
'1_ankle': ['sensors01_lankle', 'sensors01_rankle'],
'1_foot': ['sensors01_lfoot', 'sensors01_rfoot'],
'1_lumbar': ['sensors01_lumbar'],
'2_lumbar + ankle': ['sensors02_lumbar_lankle',
'sensors02_lumbar_rankle'],
'2_ankles': ['sensors02_ankles'],
'2_feet': ['sensors02_feet'],
'3_chest + feet': ['sensors03_chest_feet'],
'3_lumbar + ankles': ['sensors03_lumbar_ankles'],
'3_lumbar + feet': ['sensors03_lumbar_feet']
}
LABELS_TO_COMBINE_OTHER = {
'11_all sensors': ['sensors11'],
'1_head': ['sensors01_head'],
'1_thigh': ['sensors01_lthigh', 'sensors01_rthigh'],
'1_wrist': ['sensors01_lwrist', 'sensors01_rwrist'],
'2_lumbar + wrist': ['sensors02_lumbar_lwrist',
'sensors02_lumbar_rwrist'],
'2_wrist + ankle': ['sensors02_lwrist_lankle',
'sensors02_rwrist_rankle'],
'2_chest + wrist': ['sensors02_chest_lwrist',
'sensors02_chest_rwrist'],
'3_chest + wrist + foot': ['sensors03_chest_lwrist_lfoot',
'sensors03_chest_rwrist_rfoot'],
'3_wrist + ankles': ['sensors03_lwrist_ankles',
'sensors03_rwrist_ankles'],
'3_wrist + feet': ['sensors03_lwrist_feet','sensors03_rwrist_feet']
}
# preferred sensors, from survey results
PREFERRED_SENSORS_IMUS6 = ['3_lumbar + ankles', '2_ankles', '1_lumbar']
PREFERRED_SENSORS_IMUS11 = ['3_wrist + ankles', '2_wrist + ankle',
'2_ankles', '1_wrist']
# colors
GREEN = '#117733'
RED = '#CC3311'
BLUE = '#0077BB'
ORANGE = '#EE7733'
GREY = '#CCCCCC'
def get_preferred_indices(y_pos, labels, preferred_sensors):
"""Get indices of preferred sensor sets in plot.
Args:
y_pos (list): list of y positions used for sensors in plot.
labels (list-like): sensor set labels associated with y_pos.
preferred_sensors (list-like): preferred sensor sets.
Returns:
indices (list): y positions associated with preferred sets.
"""
labels = list(labels)
indices = []
for sensor in preferred_sensors:
label_idx = labels.index(sensor)
indices.append(y_pos[label_idx])
return indices
def get_best_technical_set(df):
"""Identify set with greatest AUROC in dataframe.
Args:
df (pd Dataframe): must contain 'auroc_mean' and 'full_label'.
Returns:
best_set (str): label of best technical set.
"""
df = df.sort_values(by=['auroc_mean'], ascending=False,
ignore_index=True)
best_set = df['full_label'].iloc[0]
return best_set
def get_minimal_IMU_set(df):
"""Identify set with fewest sensors with AUROC within 5% that of
the best technical set.
Args:
df (pd Dataframe): must contain 'n_sensor', 'auroc_mean',
and 'full_label'.
Returns:
minimal_set (str): label of minimal IMU set.
"""
df = df.sort_values(by=['n_sensor', 'auroc_mean'],
ascending=[True,False], ignore_index=True)
max_roc = df['auroc_mean'].max()
thresh = max_roc - 0.05*max_roc
df = df[df['auroc_mean']>=thresh]
minimal_set = df['full_label'].iloc[0]
return minimal_set
# create a figure for each outcome metric
for metric in METRICS:
fig, ax = plt.subplots(2, 1, figsize=(10,12),
gridspec_kw={'height_ratios': [4, 7]}, dpi=1000)
# create a subplot for each dataset
for i in range(len(N_SUBJ)):
n = N_SUBJ[i]
dataset_result_dir = DATASET_RESULT_DIRS[i]
if n==4:
labels_to_combine = {**LABELS_TO_COMBINE_IMUS6,
**LABELS_TO_COMBINE_OTHER}
preferred_sensors = PREFERRED_SENSORS_IMUS11
elif n==7:
labels_to_combine = LABELS_TO_COMBINE_IMUS6
preferred_sensors = PREFERRED_SENSORS_IMUS6
else:
raise Exception('Expected 4 or 7 as element of nSubj. '\
'Received:', n)
# get average and SD of each sensor set's performance
labels = []
auroc_means = []
auroc_sds = []
ap_means = []
ap_sds = []
for key in labels_to_combine:
mean_aurocs = []
total_aps = []
subdirs = labels_to_combine[key]
for subdir in subdirs:
path = dataset_result_dir + subdir + '/'
aurocs = np.load(path + 'aurocs.npy')
aps = np.load(path + 'aps.npy')
ppv = np.load(path + 'ppv.npy')
mean_aurocs.extend(aurocs)
total_aps.extend(aps)
labels.append(key)
auroc_means.append(np.mean(mean_aurocs))
auroc_sds.append(np.std(mean_aurocs))
ap_means.append(np.mean(total_aps))
ap_sds.append(np.std(total_aps))
df = pd.DataFrame({'full_label':labels,
'auroc_mean': auroc_means,
'auroc_sd': auroc_sds,
'ap_mean': ap_means,
'ap_sd': ap_sds})
df[['n_sensor','sensor_label']] = (df['full_label']
.str.split('_', n=1, expand=True))
df['n_sensor'] = | pd.to_numeric(df['n_sensor']) | pandas.to_numeric |
#Copyright (c) 2018 <NAME> - MIT License
import json
import pandas as pd
import os
from operator import itemgetter
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from expworkup.handlers import parser
from expworkup.handlers import calcmmol
from expworkup.handlers import calcmolarity
from expworkup.handlers import inchigen
debug = 0 #args.Debug
finalvol_entries=2 ## Hard coded number of formic acid entries at the end of the run (this needs fixing)
### General Setup Information ###
##GSpread Authorization information
scope= ['https://www.googleapis.com/auth/spreadsheets.readonly']
credentials = ServiceAccountCredentials.from_json_keyfile_name('expworkup/creds/creds.json', scope)
gc =gspread.authorize(credentials)
#Import the most recent chemical data sheet from google drive to process the inchi keys and data about chemicals
#Eventually needs to be linked to database import and broader database information
def ChemicalData():
print('Obtaining chemical information from Google Drive..', end='')
chemsheetid = "1JgRKUH_ie87KAXsC-fRYEw_5SepjOgVt7njjQBETxEg"
ChemicalBook = gc.open_by_key(chemsheetid)
chemicalsheet = ChemicalBook.get_worksheet(0)
chemical_list = chemicalsheet.get_all_values()
chemdf=pd.DataFrame(chemical_list, columns=chemical_list[0])
chemdf=chemdf.iloc[1:]
chemdf=chemdf.reset_index(drop=True)
chemdf=chemdf.set_index(['InChI Key (ID)'])
print('.done')
return(chemdf)
#Will eventually create a dataframe from the robot handling information
def robo_handling():
pass
#The name cleaner is hard coded at the moment for the chemicals we are using. This will need to be generalized somehow...
def nameCleaner(sub_dirty_df):
inorganic_list=[]
organic_df=pd.DataFrame()
cleaned_M=pd.DataFrame()
for header in sub_dirty_df.columns:
#GBl handling -- > Solvent labeled (or other solvent such as DMF)
if 'YEJRWHAVMIAJKC-UHFFFAOYSA-N' in header:# or 'ZMXDDKWLCZADIW-UHFFFAOYSA-N' in header:
print("1")
pass
#Acid handling --> Acid labeld --> will need to declare type in the future or something
elif "BDAGIHXWWSANSR-UHFFFAOYSA-N" in header:
cleaned_M['_rxn_M_acid']=sub_dirty_df[header]
# molarity_df['_rxn_M_acid'] = mmol_reagent_df[header] / (calculated_volumes_df['_raw_final_volume']/1000)
#PBI2 handling --> inorganic label
elif 'RQQRAHKHDFPBMC-UHFFFAOYSA-L' in header:# or 'ZASWJUOMEGBQCQ-UHFFFAOYSA-L' in header:
cleaned_M['_rxn_M_inorganic']=sub_dirty_df[header]
# molarity_df['_rxn_M_inorganic'] = mmol_reagent_df[header] / (calculated_volumes_df['_raw_final_volume']/1000)
else:
organic_df[header]=sub_dirty_df[header]
cleaned_M['_rxn_M_organic']=organic_df.sum(axis=1)
return(cleaned_M)
#cleans up the name space and the csv output for distribution
def cleaner(dirty_df, raw):
rxn_M_clean = nameCleaner(dirty_df.filter(like='_raw_M_'))
rxn_df=dirty_df.filter(like='_rxn_')
feat_df=dirty_df.filter(like='_feat_')
out_df=dirty_df.filter(like='_out_')
if raw == 0:
raw_df=dirty_df.filter(like='_raw_')
squeaky_clean_df=pd.concat([out_df,rxn_M_clean,rxn_df,feat_df, raw_df], axis=1)
else:
squeaky_clean_df=pd.concat([out_df,rxn_M_clean,rxn_df,feat_df], axis=1)
return(squeaky_clean_df)
## Unpack logic
#most granular data for each row of the final CSV is the well information.
#Each well will need all associated information of chemicals, run, etc.
#Unpack those values first and then copy the generated array to each of the invidual wells
### developed enough now that it should be broken up into smaller pieces!
def unpackJSON(myjson_fol):
chem_df=(ChemicalData()) #Grabs relevant chemical data frame from google sheets (only once no matter how many runs)
concat_df_raw=pd.DataFrame()
for file in sorted(os.listdir(myjson_fol)):
if file.endswith(".json"):
concat_df=pd.DataFrame()
#appends each run to the original dataframe
myjson=(os.path.join(myjson_fol, file))
workflow1_json = json.load(open(myjson, 'r'))
#gathers all information from raw data in the JSON file
tray_df=parser.reagentparser(workflow1_json, myjson, chem_df) #generates the tray level dataframe for all wells including some calculated features
concat_df=pd.concat([concat_df,tray_df], ignore_index=True, sort=True)
#generates a well level unique ID and aligns
runID_df=pd.DataFrame(data=[concat_df['_raw_jobserial'] + '_' + concat_df['_raw_vialsite']]).transpose()
runID_df.columns=['RunID_vial']
#Gets the mmol of each CHEMICAL and returns them summed and uniquely indexed
mmol_df=calcmmol.mmol_breakoff(tray_df, runID_df)
#combines all operations into a final dataframe for the entire tray level view with all information
concat_df=pd.concat([mmol_df, concat_df, runID_df], sort=True, axis=1)
#Combines the most recent dataframe with the final dataframe which is targeted for export
concat_df_raw = pd.concat([concat_df_raw,concat_df], sort=True)
return(concat_df_raw) #this contains all of the raw values from the processed JSON files. No additional data has been calculated
def augmentdataset(raw_df):
''' Processes full dataset through a series of operations to add molarity, features, calculated values, etc
Takes the raw dataset compiled from the JSON files of each experiment and
performs rudimentary operations including: calculating concentrations and
adding features.
*This needs to be broken out into a separate module with each task allocated
a single script which can be edited independently
'''
rawdataset_df_filled = raw_df.fillna(0) #ensures that all values are filled (possibly problematic as 0 has a value)
dataset_calcs_fill_df = augmolarity(rawdataset_df_filled)
dataset_calcs_desc_fill_df = augdescriptors(dataset_calcs_fill_df)
return(dataset_calcs_desc_fill_df)
def augmolarity(concat_df_final):
''' Perform exp object molarity calculations (ideal concentrations), grab organic inchi
'''
concat_df_final.set_index('RunID_vial', inplace=True)
#grabs all of the raw mmol data from the column header and creates a column which uniquely identifies which organic will be needed for the features in the next step
inchi_df = concat_df_final.filter(like='_InChIKey')
inchi_df.to_csv('rxndf.csv')
#Sends off the final mmol list to specifically grab the organic inchi key and expose(current version)
OrganicInchi_df=inchigen.GrabOrganicInchi(inchi_df)
#takes all of the volume data from the robot run and reduces it into two total volumes, the total prior to FAH and the total after. Returns a 3 column array "totalvol and finalvol in title"
molarity_df=calcmolarity.molarity_calc(concat_df_final, finalvol_entries)
#Combines the new Organic inchi file and the sum volume with the main dataframe
dataset_calcs_fill_df=pd.concat([OrganicInchi_df, concat_df_final, molarity_df], axis=1, join_axes=[concat_df_final.index])
#Cleans the file in different ways for post-processing analysis
return(dataset_calcs_fill_df)
def augdescriptors(dataset_calcs_fill_df):
''' bring in the inchi key based features for a left merge
Temporary holder for processing the descriptors and adding them to the complete dataset.
If an amine is not present in the "perov_desc.csv1" file, the run will not be processed
and will error out silently! This is a feature not a bug (for now)
'''
with open('data/perov_desc.csv1', 'r') as my_descriptors:
descriptor_df=pd.read_csv(my_descriptors)
dirty_full_df=dataset_calcs_fill_df.merge(descriptor_df, left_on='_rxn_organic-inchikey', right_on='_raw_inchikey', how='inner')
runID_df_big= | pd.DataFrame(data=[dirty_full_df['_raw_jobserial'] + '_' + dirty_full_df['_raw_vialsite']]) | pandas.DataFrame |
import pandas as pd
import cufflinks as _cf
import plotly as _plotly
from matplotlib import pyplot as _plt
from datapool_client.api.api import DataPool
from datapool_client.core.formatting import reshape
from datapool_client.core.plots import generate_meta_plot
from datapool_client.core.utilities import (
determine_additional_meta_info_columns_of_meta_data_history,
)
_cf.go_offline()
class Plot:
def __init__(self, **kwargs):
self.__dp = DataPool(**kwargs)
def plot_signal(
self,
source_name,
parameter_name=None,
start="1900-01-01 00:00:00",
end=None,
plot_dynamic=True,
auto_open=True,
inline=False,
filename="plot.html",
show_query=False,
**kw_plot_args,
):
"""
Parameters
----------
source_name: str, specifying the name of the source instance
parameter_name: str | list, of parameter name(s)
start: str, specifying a datetime ideally in the format yyyy-mm-dd HH:MM:SS
end: str, specifying a datetime ideally in the format yyyy-mm-dd HH:MM:SS
plot_dynamic: bool, specifying whether to plot dynamically or statically
auto_open: bool, open plot in browser automatically if inline if False
inline: bool, return the figure so you can open it in your jupyter notebook
filename: str, filename of plot.html if inline is False
show_query: bool, specifying whether to print the query
**kw_plot_args: key word arguments that will be passed down to plot function. If plot_dynamic=True
keyword arguments will be passed to cufflinks (.iplot method) otherwise the arguments
will be passed matplotlib/pandas' (.plot method)
Return
------
signal_data: pandas.DataFrame, fig: plotly.Figure if inline=True
Example
-------
# plotting everything from a source
df=dp.signal.plot_signal("bt_dl927_164_luppmenweg")
# plotting one parameter of a source
df=dp.signal.plot_signal("bt_dl927_164_luppmenweg","battery voltage")
# plotting between time
df=dp.signal.plot_signal("bt_dl927_164_luppmenweg","battery voltage","2019-01-01","2019-01-07")
#plotting multiple parameters of a source
df=dp.signal.plot_signal("bt_dl927_164_luppmenweg",["SNR","battery voltage"])
--- with **kw_plot_args ---
### passed to pandas/matplotlib ###
# static plot splitted into subplots, specifying the figuresize
df=dp.signal.plot_signal("bt_dl927_164_luppmenweg",plot_dynamic=False, subplots = True, figsize=(16,16))
### passed to cufflinks ###
# dynamic plot with rangeslider
df=dp.signal.plot_signal("bt_dl927_164_luppmenweg", rangeslider=True)
# dynamic plot each signal in own plot
df=dp.signal.plot_signal("bt_dl927_164_luppmenweg", subplots = True)
# dynamic plot each signal in own plot, below each other
df=dp.signal.plot_signal("bt_dl927_164_luppmenweg", subplots = True, shape=(6,1))
# dynamic plot with rangeslider, not opening in browser
df=dp.signal.plot_signal("bt_dl927_164_luppmenweg", rangeslider=True, open_in_browser=False)
# dynamic plot with second y axis and rangeslider
df=dp.signal.plot_signal("bt_dl927_164_luppmenweg",["SNR","battery voltage"], rangeslider=True,secondary_y=["SNR"])
Other
-----
To get to know more about dynamic plotting in python:
# dynamic
https://plot.ly/python/
https://github.com/santosjorge/cufflinks (connector between pandas and plotly)
# static
https://matplotlib.org/3.0.2/index.html
https://pandas.pydata.org/pandas-docs/stable/user_guide/visualization.html
"""
df = self.__dp.signal.get(
source_name=source_name,
parameter_name=parameter_name,
start=start,
end=end,
show_query=show_query,
)
df_reshaped = reshape(df)
if plot_dynamic:
fig = df_reshaped.iplot(asFigure=True, **kw_plot_args)
if inline:
return df_reshaped, fig
else:
_plotly.offline.plot(fig, auto_open=auto_open, filename=filename)
else:
df_reshaped.plot(**kw_plot_args)
_plt.show()
return df_reshaped
def plot_signal_with_meta(
self,
*,
parameter_name=None,
source_name=None,
site_name=None,
start="1900-01-01 00:00:00",
end=None,
to_dataframe=True,
show_query=False,
color_encoding={
"source_installation": "#3a86ff",
"source_deinstallation": "#3a86ff",
"source_maintenance": "#8338ec",
"operational_malfunction": "#e5383b",
"miscellaneous": "#ffbe0b",
},
mark_via_key_word=None,
minimal_meta_info_with_minutes=10,
plot_title="Signal Meta Plot",
filename="meta_plot.html",
auto_open=True,
inline=False,
):
"""Arguments must be provided with keywords!
Parameters
----------
source_name: Str, specifying the source name
parameter_name: str | list, of parameter name(s)
site_name: Str, specifying the site name
start: Str, specifying a datetime ideally in the format yyyy-mm-dd HH:MM:SS
end: Str, specifying a datetime ideally in the format yyyy-mm-dd HH:MM:SS
to_dataframe: bool, specifying whether the query output should be formatted as dataframe
show_query: bool, specifying whether to print the query
color_encoding: dict, specifying plot colors of log types
mark_via_key_word: dict, specifying a keyword to search in meta-string and color for annotating
with an arrow on the bottom of the plot.
minimal_meta_info_with_minutes: Int, setting the minimal width of meta data area plots in minutes
plot_title: Str, title for plot
filename: Str, filename of plot.html if inline is False
auto_open: bool, open plot in browser automatically if inline if False
inline: bool, return the figure so you can open it in your jupyter notebook
Return
------
signal_data: pandas.DataFrame, meta_data: pd.DataFrame, fig: plotly.Figure if inline=True
Example
-------
from datapool_client import Plot
dp_plot = Plot() # this only works when a default connection has been set!
data, meta = dp_plot.plot_with_meta(
source_name="bl_dl320_597sbw_ara",
parameter_name="water_level",
start="2017-02-01",
end="2017-07-01",
)
"""
if source_name is None and site_name is None:
raise ValueError(
"You must provide at least one of the two: source_name, site_name."
)
data = self.__dp.signal.get(
source_name=source_name,
site_name=site_name,
parameter_name=parameter_name,
start=start,
end=end,
to_dataframe=to_dataframe,
show_query=show_query,
)
meta_data = self.__dp.meta_data_history.get(
source_name=source_name,
site_name=site_name,
start=start,
end=end,
to_dataframe=to_dataframe,
show_query=show_query,
)
if meta_data.empty and data.empty:
print("No data, no meta data available")
return data, meta_data
elif data.empty:
print("No data available")
return data, meta_data
elif meta_data.empty:
print("No meta data available")
return data, meta_data
data = reshape(data)
additional_meta_columns = (
determine_additional_meta_info_columns_of_meta_data_history(
meta_data.columns
)
)
meta_data["start"] = | pd.to_datetime(meta_data["start"]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""Retrieve metadata from PLEXOS production cost modelling results.
Database can be either a h5plexos file or a formatted Marmot hdf5 file.
@author: <NAME>
"""
import os
import sys
import h5py
import pandas as pd
import numpy as np
import logging
class MetaData():
"""Handle the retrieval of metadata from the formatted or
original plexos solution h5 files.
Attributes:
filename (str) = The name of the h5 file to retreive data from.
h5_data (h5py.File) = loaded h5 file in memory.
"""
filename: str = None
h5_data: h5py.File = None
def __init__(self, HDF5_folder_in: str, read_from_formatted_h5: bool = True,
Region_Mapping: pd.DataFrame = pd.DataFrame(),
partition_number: int = 0):
"""
Args:
HDF5_folder_in (str): Folder containing h5plexos h5 files.
read_from_formatted_h5 (bool, optional): Boolean for whether the metadata is
being read from the formatted hdf5 file or the original PLEXOS solution file.
Defaults to True.
Region_Mapping (pd.DataFrame, optional): DataFrame of extra regions to map.
Defaults to pd.DataFrame().
partition_number (int, optional): Which temporal partition of h5 data to retrieve
metadata from in the formatted h5 file. Defaults to 0.
"""
self.logger = logging.getLogger('marmot_format.'+__name__)
self.HDF5_folder_in = HDF5_folder_in
self.Region_Mapping = Region_Mapping
self.read_from_formatted_h5 = read_from_formatted_h5
self.partition_number = partition_number
self.start_index = None
@classmethod
def _check_if_existing_filename(cls, filename: str) -> bool:
"""Check if the passed filename is the same or different from previous calls.
If file is different replaces the filename with new value
and closes old file
Args:
filename (str): The name of the h5 file to retreive data from.
Returns:
bool: False if new file, True if existing
"""
if cls.filename != filename:
cls.filename = filename
cls.close_h5()
return False
elif cls.filename == filename:
return True
@classmethod
def close_h5(cls) -> None:
"""Closes h5 file open in memory.
"""
if cls.h5_data:
cls.h5_data.close()
def _read_data(self, filename: str) -> None:
"""Reads h5 file into memory.
Args:
filename (str): The name of the h5 file to retreive
data from.
"""
self.logger.debug(f"Reading New h5 file: {filename}")
processed_file_format = "{}_formatted.h5"
try:
if self.read_from_formatted_h5:
filename = processed_file_format.format(filename)
self.h5_data = h5py.File(os.path.join(self.HDF5_folder_in, filename), 'r')
partitions = [key for key in self.h5_data['metadata'].keys()]
if self.partition_number > len(partitions):
self.logger.warning(f"\nYou have chosen to use metadata partition_number {self.partition_number}, "
f"But there are only {len(partitions)} partitions in your formatted h5 file.\n"
"Defaulting to partition_number 0")
self.partition_number = 0
self.start_index = f"metadata/{partitions[self.partition_number]}/"
else:
self.h5_data = h5py.File(os.path.join(self.HDF5_folder_in, filename), 'r')
self.start_index = "metadata/"
except OSError:
if self.read_from_formatted_h5:
self.logger.warning("Unable to find processed HDF5 file to retrieve metadata.\n"
"Check scenario name.")
return
else:
self.logger.info("\nIn order to initialize your database's metadata, "
"Marmot is looking for a h5plexos solution file.\n"
f"It is looking in {self.HDF5_folder_in}, but it cannot "
"find any *.h5 files there.\n"
"Please check the 'PLEXOS_Solutions_folder' input in your "
"'Marmot_user_defined_inputs.csv'.\n"
"Ensure that it matches the filepath containing the *.h5 files "
"created by h5plexos.\n\nMarmot will now quit.")
sys.exit()
def generator_category(self, filename: str) -> pd.DataFrame:
"""Generator categories mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
gen_category = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'objects/generator']))
except KeyError:
gen_category = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'objects/generators']))
gen_category.rename(columns={'name':'gen_name','category':'tech'}, inplace=True)
gen_category = gen_category.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
except KeyError:
gen_category = pd.DataFrame()
return gen_category
def region_generators(self, filename: str) -> pd.DataFrame:
"""Region generators mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
region_gen = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/regions_generators']))
except KeyError:
region_gen = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/region_generators']))
region_gen.rename(columns={'child':'gen_name','parent':'region'}, inplace=True)
region_gen = region_gen.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
region_gen.drop_duplicates(subset=["gen_name"],keep='first',inplace=True) #For generators which belong to more than 1 region, drop duplicates.
except KeyError:
region_gen = pd.DataFrame()
return region_gen
def region_generator_category(self, filename: str) -> pd.DataFrame:
"""Region generators category mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
try:
region_gen = self.region_generators(filename)
gen_category = self.generator_category(filename)
region_gen_cat = region_gen.merge(gen_category,
how="left", on='gen_name').sort_values(by=['tech','gen_name']).set_index('region')
except KeyError:
region_gen_cat = pd.DataFrame()
return region_gen_cat
def zone_generators(self, filename: str) -> pd.DataFrame:
"""Zone generators mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
zone_gen = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/zones_generators']))
except KeyError:
zone_gen = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/zone_generators']))
zone_gen.rename(columns={'child':'gen_name','parent':'zone'}, inplace=True)
zone_gen = zone_gen.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
zone_gen.drop_duplicates(subset=["gen_name"],keep='first',inplace=True) #For generators which belong to more than 1 region, drop duplicates.
except KeyError:
zone_gen = pd.DataFrame()
return zone_gen
def zone_generator_category(self, filename: str) -> pd.DataFrame:
"""Zone generators category mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
try:
zone_gen = self.zone_generators(filename)
gen_category = self.generator_category(filename)
zone_gen_cat = zone_gen.merge(gen_category,
how="left", on='gen_name').sort_values(by=['tech','gen_name']).set_index('zone')
except KeyError:
zone_gen_cat = pd.DataFrame()
return zone_gen_cat
# Generator storage has been updated so that only one of tail_storage & head_storage is required
# If both are available, both are used
def generator_storage(self, filename: str) -> pd.DataFrame:
"""Generator Storage mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
head_tail = [0,0]
try:
generator_headstorage = pd.DataFrame()
generator_tailstorage = pd.DataFrame()
try:
generator_headstorage = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/generators_headstorage']))
head_tail[0] = 1
except KeyError:
pass
try:
generator_headstorage = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/generator_headstorage']))
head_tail[0] = 1
except KeyError:
pass
try:
generator_headstorage = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/exportinggenerators_headstorage']))
head_tail[0] = 1
except KeyError:
pass
try:
generator_tailstorage = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/generators_tailstorage']))
head_tail[1] = 1
except KeyError:
pass
try:
generator_tailstorage = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/generator_tailstorage']))
head_tail[1] = 1
except KeyError:
pass
try:
generator_tailstorage = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/importinggenerators_tailstorage']))
head_tail[1] = 1
except KeyError:
pass
if head_tail[0] == 1:
if head_tail[1] == 1:
gen_storage = pd.concat([generator_headstorage, generator_tailstorage])
else:
gen_storage = generator_headstorage
else:
gen_storage = generator_tailstorage
gen_storage.rename(columns={'child':'name','parent':'gen_name'}, inplace=True)
gen_storage = gen_storage.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
except:
gen_storage = pd.DataFrame()
return gen_storage
def node_region(self, filename: str) -> pd.DataFrame:
"""Node Region mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
node_region = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/nodes_region']))
except KeyError:
node_region = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/node_region']))
node_region.rename(columns={'child':'region','parent':'node'}, inplace=True)
node_region = node_region.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
node_region = node_region.sort_values(by=['node']).set_index('region')
except:
node_region = pd.DataFrame()
return node_region
def node_zone(self, filename: str) -> pd.DataFrame:
"""Node zone mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
node_zone = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/nodes_zone']))
except KeyError:
node_zone = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/node_zone']))
node_zone.rename(columns={'child':'zone','parent':'node'}, inplace=True)
node_zone = node_zone.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
node_zone = node_zone.sort_values(by=['node']).set_index('zone')
except:
node_zone = pd.DataFrame()
return node_zone
def generator_node(self, filename: str) -> pd.DataFrame:
"""generator node mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
generator_node = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/generators_nodes']))
except KeyError:
generator_node = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/generator_nodes']))
generator_node.rename(columns={'child':'node','parent':'gen_name'}, inplace=True)
generator_node = generator_node.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
# generators_nodes = generators_nodes.sort_values(by=['generator'])
except:
generator_node = pd.DataFrame()
return generator_node
def regions(self, filename: str) -> pd.DataFrame:
"""Region objects.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
regions = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'objects/regions']))
except KeyError:
regions = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'objects/region']))
regions = regions.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
regions.rename(columns={'name':'region'}, inplace=True)
regions.sort_values(['category','region'],inplace=True)
except KeyError:
self.logger.warning("Regional data not included in h5plexos results")
regions = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2020 <NAME>, Pôle OFB-INRAE ECLA, UR RECOVER
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains various useful functions and classes used in the CLI."""
from pathlib import Path
from typing import Optional
import click
import pandas as pd
from shapely.wkt import loads
class PathPath(click.Path):
"""A Click path argument that returns a pathlib Path, not a string"""
def convert(self, value, param, ctx):
return Path(super().convert(value, param, ctx))
class Mutex(click.Option):
"""Mutually exclusive options (with at least one required)."""
def __init__(self, *args, **kwargs):
self.not_required_if = kwargs.pop('not_required_if') # list
assert self.not_required_if, '"not_required_if" parameter required'
kwargs['help'] = (f'{kwargs.get("help", "")} [required; mutually '
f'exclusive with {", ".join(self.not_required_if)}]')
super().__init__(*args, **kwargs)
def handle_parse_result(self, ctx, opts, args):
current_opt = self.name in opts # bool
if current_opt:
i = 1
else:
i = 0
for mutex_opt in self.not_required_if:
if mutex_opt in opts:
i += 1
if current_opt:
msg = (f'Illegal usage: "{self.name}" is mutually '
f'exclusive with "{mutex_opt}".')
raise click.UsageError(msg)
else:
self.prompt = None
if i == 0:
signature = ' / '.join(self.opts + self.secondary_opts)
msg = (f"Missing option '{signature}' (or any of the following "
f"options: {', '.join(self.not_required_if)})")
raise click.UsageError(msg)
return super().handle_parse_result(ctx, opts, args)
class Mutin(click.Option):
"""Mutually inclusive options."""
def __init__(self, *args, **kwargs):
self.required_if = kwargs.pop('required_if') # list
assert self.required_if, '"required_if" parameter required'
kwargs['help'] = (f'{kwargs.get("help", "")} [required if '
'the following options are used : '
f'{", ".join(self.required_if)}]')
super().__init__(*args, **kwargs)
def handle_parse_result(self, ctx, opts, args):
current_opt = self.name in opts # bool
for mutin_opt in self.required_if:
if mutin_opt not in opts:
if current_opt:
msg = (f'Illegal usage: "{mutin_opt}" is required '
f'if "{self.name}" is used.')
raise click.UsageError(msg)
return super().handle_parse_result(ctx, opts, args)
def _read_optional_column(df, key):
if key in df.columns:
return df[key].to_list()
return None
def read_products_list(path: Path, is_ts=False) -> dict:
"""Parse a text file and return a config dictionnary."""
df_products = pd.read_csv(path, ' ')
if is_ts:
product_types = _read_optional_column(df_products, 'product_type')
else:
product_types = df_products['product_type'].to_list()
config = {
'input_products': df_products['input_product'].to_list(),
'product_types': product_types,
'lst_tb': _read_optional_column(df_products, 'theia_bands'),
'lst_gc': _read_optional_column(df_products, 'glint_corrected'),
'lst_flags': _read_optional_column(df_products, 'flags'),
'filenames': _read_optional_column(df_products, 'out_product'),
'lst_masks_list': _read_optional_column(df_products, 'masks_list'),
'lst_code_site': _read_optional_column(df_products, 'code_site'),
'lst_res': _read_optional_column(df_products, 'res'),
'lst_proc_res': _read_optional_column(df_products, 'proc_res')
}
theia_masks = _read_optional_column(df_products, 'theia_masks')
if theia_masks is not None:
ll = []
for e in theia_masks:
if isinstance(e, str):
dd = {}
for _ in e.split(','):
if len(_) == 3:
dd[_] = None
else:
dd[_[:3]] = [int(__) for __ in _[3:]]
ll.append(dd)
else:
ll.append(None)
config['lst_tm'] = ll
lst_shp = _read_optional_column(df_products, 'shp')
if lst_shp is None:
lst_shp = [None for _ in range(len(df_products))]
lst_wkt = _read_optional_column(df_products, 'wkt')
if lst_wkt is None:
lst_wkt = [None for _ in range(len(df_products))]
lst_wf = _read_optional_column(df_products, 'wkt_file')
if lst_wf is None:
lst_wf = [None for _ in range(len(df_products))]
lst_srid = _read_optional_column(df_products, 'srid')
if lst_srid is None:
lst_srid = [None for _ in range(len(df_products))]
geoms = []
for shp, wkt, wkt_file, srid in zip(lst_shp, lst_wkt, lst_wf, lst_srid):
if shp is None and wkt is None and wkt_file is None and srid is None:
geoms.append(None)
else:
geoms.append({
'geom': None if wkt is None else loads(wkt),
'shp': shp,
'wkt': wkt_file,
'srid': srid
})
if geoms != [None] * len(geoms):
config['lst_geom'] = geoms
return {key: val for key, val in config.items() if val is not None}
def _read_calib(df: pd.DataFrame, key: str) -> list:
if key in df.columns:
return df[key].to_list()
return [None for _ in range(len(df))]
def _get_path(elem: Optional[str]) -> Path:
if pd.isna(elem):
return None
return Path(elem)
def read_algos_list(path: Path) -> dict:
"""Parse a text file and return a config dictionnary."""
df_algos = pd.read_csv(path, ' ')
config = {
'lst_algo': df_algos['algo'].to_list(),
'lst_band': _read_optional_column(df_algos, 'band'),
'lst_design': _read_optional_column(df_algos, 'design')
}
lst_calib = _read_calib(df_algos, 'calib')
lst_custom_calib = _read_calib(df_algos, 'custom_calib')
lst_custom_calib = [_get_path(_) for _ in lst_custom_calib]
merge_calib = []
for calib, custom_calib in zip(lst_calib, lst_custom_calib):
if not pd.isna(calib):
merge_calib.append(calib)
else:
merge_calib.append(custom_calib)
config['lst_calib'] = merge_calib
return {key: val for key, val in config.items() if val is not None}
def read_masks_list(path: Path) -> dict:
"""Parse a text file and return a config dictionnary."""
df_l3masks = | pd.read_csv(path, ' ') | pandas.read_csv |
import sys
sys.path.append("./")
import backtrader as bt
from backtrader import plot
import matplotlib.pyplot as plt
import os, sqlite3, config
import pandas as pd
from jinja2 import Environment, FileSystemLoader
from weasyprint import HTML
from utils import timestamp2str, get_now, dir_exists
conn = sqlite3.connect(config.DB_FILE)
class PerformanceReport:
""" Report with performce stats for given backtest run
"""
def __init__(self, stratbt, conn, infilename, user, memo, outputdir, run_id):
self.stratbt = stratbt # works for only 1 stategy
self.infilename = infilename
self.outputdir = outputdir
self.user = user
self.memo = memo
self.check_and_assign_defaults()
self.conn = conn
self.cursor = self.conn.cursor()
self.run_id = run_id
def check_and_assign_defaults(self):
""" Check initialization parameters or assign defaults
"""
if not self.infilename:
self.infilename = 'Not given'
# if not dir_exists(self.outputdir):
# msg = "*** ERROR: outputdir {} does not exist."
# print(msg.format(self.outputdir))
# sys.exit(0)
if not self.user:
self.user = 'GKCap'
if not self.memo:
self.memo = 'No comments'
def get_performance_stats(self):
""" Return dict with performace stats for given strategy withing backtest
"""
st = self.stratbt
dt = st.data._dataname['open'].index
trade_analysis = st.analyzers.myTradeAnalysis.get_analysis()
rpl = trade_analysis.pnl.net.total
total_return = rpl / self.get_startcash()
total_number_trades = trade_analysis.total.total
trades_closed = trade_analysis.total.closed
bt_period = dt[-1] - dt[0]
bt_period_days = bt_period.days
drawdown = st.analyzers.myDrawDown.get_analysis()
sharpe_ratio = st.analyzers.mySharpe.get_analysis()['sharperatio']
sqn_score = st.analyzers.mySqn.get_analysis()['sqn']
kpi = {# PnL
'start_cash': self.get_startcash(),
'rpl': rpl,
'result_won_trades': trade_analysis.won.pnl.total,
'result_lost_trades': trade_analysis.lost.pnl.total,
'profit_factor': (-1 * trade_analysis.won.pnl.total / trade_analysis.lost.pnl.total),
'rpl_per_trade': rpl / trades_closed,
'total_return': 100 * total_return,
'annual_return': (100 * (1 + total_return)**(365.25 / bt_period_days) - 100),
'max_money_drawdown': drawdown['max']['moneydown'],
'max_pct_drawdown': drawdown['max']['drawdown'],
# trades
'total_number_trades': total_number_trades,
'trades_closed': trades_closed,
'pct_winning': 100 * trade_analysis.won.total / trades_closed,
'pct_losing': 100 * trade_analysis.lost.total / trades_closed,
'avg_money_winning': trade_analysis.won.pnl.average,
'avg_money_losing': trade_analysis.lost.pnl.average,
'best_winning_trade': trade_analysis.won.pnl.max,
'worst_losing_trade': trade_analysis.lost.pnl.max,
# performance
'sharpe_ratio': sharpe_ratio,
'sqn_score': sqn_score,
'sqn_human': self._sqn2rating(sqn_score)
}
return kpi
def get_equity_curve(self):
""" Return series containing equity curve
"""
st = self.stratbt
dt = st.data._dataname['open'].index
value = st.observers.broker.lines[1].array[:len(dt)]
curve = pd.Series(data=value, index=dt)
return 100 * curve / curve.iloc[0]
def _sqn2rating(self, sqn_score):
""" Converts sqn_score score to human readable rating
See: http://www.vantharp.com/tharp-concepts/sqn.asp
"""
if sqn_score < 1.6:
return "Poor"
elif sqn_score < 1.9:
return "Below average"
elif sqn_score < 2.4:
return "Average"
elif sqn_score < 2.9:
return "Good"
elif sqn_score < 5.0:
return "Excellent"
elif sqn_score < 6.9:
return "Superb"
else:
return "Holy Grail"
def __str__(self):
msg = ("*** PnL: ***\n"
"Start capital : {start_cash:4.2f}\n"
"Total net profit : {rpl:4.2f}\n"
"Result winning trades : {result_won_trades:4.2f}\n"
"Result lost trades : {result_lost_trades:4.2f}\n"
"Profit factor : {profit_factor:4.2f}\n"
"Total return : {total_return:4.2f}%\n"
"Annual return : {annual_return:4.2f}%\n"
"Max. money drawdown : {max_money_drawdown:4.2f}\n"
"Max. percent drawdown : {max_pct_drawdown:4.2f}%\n\n"
"*** Trades ***\n"
"Number of trades : {total_number_trades:d}\n"
" %winning : {pct_winning:4.2f}%\n"
" %losing : {pct_losing:4.2f}%\n"
" avg money winning : {avg_money_winning:4.2f}\n"
" avg money losing : {avg_money_losing:4.2f}\n"
" best winning trade: {best_winning_trade:4.2f}\n"
" worst losing trade: {worst_losing_trade:4.2f}\n\n"
"*** Performance ***\n"
"Sharpe ratio : {sharpe_ratio:4.2f}\n"
"SQN score : {sqn_score:4.2f}\n"
"SQN human : {sqn_human:s}"
)
kpis = self.get_performance_stats()
# see: https://stackoverflow.com/questions/24170519/
# python-# typeerror-non-empty-format-string-passed-to-object-format
kpis = {k: -999 if v is None else v for k, v in kpis.items()}
return msg.format(**kpis)
def plot_equity_curve(self, fname='equity_curve.png'):
""" Plots equity curve to png file
"""
curve = self.get_equity_curve()
buynhold = self.get_buynhold_curve()
xrnge = [curve.index[0], curve.index[-1]]
dotted = pd.Series(data=[100, 100], index=xrnge)
fig, ax = plt.subplots(1, 1)
ax.set_ylabel('Net Asset Value (start=100)')
ax.set_title('Equity curve')
_ = curve.plot(kind='line', ax=ax)
_ = buynhold.plot(kind='line', ax=ax, color='grey')
_ = dotted.plot(kind='line', ax=ax, color='grey', linestyle=':')
return fig
def _get_periodicity(self):
""" Maps length backtesting interval to appropriate periodiciy for return plot
"""
curve = self.get_equity_curve()
startdate = curve.index[0]
enddate = curve.index[-1]
time_interval = enddate - startdate
time_interval_days = time_interval.days
if time_interval_days > 5 * 365.25:
periodicity = ('Yearly', 'Y')
elif time_interval_days > 365.25:
periodicity = ('Monthly', 'M')
elif time_interval_days > 50:
periodicity = ('Weekly', '168H')
elif time_interval_days > 5:
periodicity = ('Daily', '24H')
elif time_interval_days > 0.5:
periodicity = ('Hourly', 'H')
elif time_interval_days > 0.05:
periodicity = ('Per 15 Min', '15M')
else: periodicity = ('Per minute', '1M')
return periodicity
def plot_return_curve(self, fname='return_curve.png'):
""" Plots return curve to png file
"""
curve = self.get_equity_curve()
period = self._get_periodicity()
values = curve.resample(period[1]).ohlc()['close']
# returns = 100 * values.diff().shift(-1) / values
returns = 100 * values.diff() / values
returns.index = returns.index.date
is_positive = returns > 0
fig, ax = plt.subplots(1, 1)
ax.set_title("{} returns".format(period[0]))
ax.set_xlabel("date")
ax.set_ylabel("return (%)")
_ = returns.plot.bar(color=is_positive.map({True: 'green', False: 'red'}), ax=ax)
return fig
def generate_html(self):
""" Returns parsed HTML text string for report
"""
basedir = os.path.abspath(os.path.dirname(__file__))
images = os.path.join(basedir, 'report_templates')
eq_curve = os.path.join(images, 'equity_curve.png')
rt_curve = os.path.join(images, 'return_curve.png')
fig_equity = self.plot_equity_curve()
fig_equity.savefig(eq_curve)
fig_return = self.plot_return_curve()
fig_return.savefig(rt_curve)
env = Environment(loader=FileSystemLoader('.'))
template = env.get_template("report_templates/template.html")
header = self.get_header_data()
kpis = self.get_performance_stats()
graphics = {'url_equity_curve': 'file://' + eq_curve,
'url_return_curve': 'file://' + rt_curve
}
all_numbers = {**header, **kpis, **graphics}
html_out = template.render(all_numbers)
return html_out
def generate_return_data(self):
curve = self.get_equity_curve()
period = self._get_periodicity()
values = curve.resample(period[1]).ohlc()['close']
# returns = 100 * values.diff().shift(-1) / values
returns = 100 * values.diff() / values
returns.index = returns.index.date
returns = | pd.Series(data=returns, index=returns.index) | pandas.Series |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Parse predicted vs actual family membership from the HMM model output.
The HMMER suite of programs provides utilities to build HMM profiles from
alignments of protein sequences, and evaluate the predicted class membership for
sets of unaligned protein sequences. HMMER can be installed by running
apt-get install hmmer
The HMMER manual can be found at
http://eddylab.org/software/hmmer/Userguide.pdf
Given a set of PFAM hmmsearch output files built from a set of unaligned test
sequences, this script extracts output statistics from these text files.
Specifically it records all sequences that score below the reporting threshold
which by default is set to an E-value <= 10. If no such sequences were found,
then a sentinel value is written.
The output files analyzed in this script are produced with a command like"
```
hmmsearch --tblout pfam_output/PF00131.19.txt pfam_hmm/PF00131.19.hmm
testseqs.fasta
```
A sample HMMER output file can be found in hmmer_test.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
import tempfile
from absl import logging
import hmmer_utils
import pandas as pd
import parallel
import tensorflow.compat.v1 as tf
from tqdm import tqdm
# Manually verified 12 threads performs better than anything smaller.
# 12 is the number of cores on most engineering desktops.
_NUM_HMMER_PROCESSES_TO_RUN_IN_PARALLEL = 12
def run_hmmbuild_for_seqalign(seqalign_file_path, profile_file_path):
"""Write output of hmmsearch binary of align all queries with all sequences.
This runs with the command
hmmbuild hmmfile msafile
Args:
seqalign_file_path: string. Path to aligned family fasta file (msafile).
profile_file_path: string. Filename of hmm profile to be made by hmmbuild.
"""
subprocess.check_output(
['hmmbuild', '--amino', profile_file_path, seqalign_file_path])
def run_hmmbuild_write_profiles(train_align_dir, hmm_dir):
"""Run hmmbuild over all train alignments and write profiles.
Args:
train_align_dir: string. Directory of aligned sequence files for training.
hmm_dir: string. Path to write .hmm files created by HMMbuild.
"""
if not tf.io.gfile.IsDirectory(hmm_dir):
logging.warn('Making directory %s', hmm_dir)
tf.io.gfile.MakeDirs(hmm_dir)
list_of_args_to_function = []
for seqalign_file in os.listdir(train_align_dir):
family_name = hmmer_utils.get_name_from_seq_filename(seqalign_file)
train_seqalign_file_path = os.path.join(train_align_dir, seqalign_file)
profile_file_path = os.path.join(hmm_dir, family_name) + '.hmm'
list_of_args_to_function.append(
dict(
seqalign_file_path=train_seqalign_file_path,
profile_file_path=profile_file_path))
logging.info('Building hmms for %d families.', len(list_of_args_to_function))
parallel.RunInParallel(
run_hmmbuild_for_seqalign,
list_of_args_to_function,
_NUM_HMMER_PROCESSES_TO_RUN_IN_PARALLEL,
cancel_futures=True)
def run_hmmsearch_for_profile(profile_file_path, test_sequence_file,
request_all_match_output):
"""Run hmmsearch binary of all test sequences against an hmm profile.
Args:
profile_file_path: string. Filename of .hmm profile made by hmmbuild.
test_sequence_file: string. Fasta file containing all test sequences.
request_all_match_output: boolean. If True, run hmmsearch with --max
to turn off all filtering.
Returns:
string. Output of running the binary hmmsearch.
"""
command = [
'hmmsearch',
'--tblout',
'/dev/stdout',
'-o',
'/dev/null',
]
if request_all_match_output:
command.append('--max')
command.extend([profile_file_path, test_sequence_file])
return subprocess.check_output(command)
def write_hmmsearch_outputs_for_one_profile(hmm_profile, test_sequence_file,
request_all_match_output):
"""Returns HMMEROutput from hmmsearch for the hmm against the test file.
Args:
hmm_profile: string. Filename of file created by hmmsearch for a set of test
sequences.
test_sequence_file: string. Path to fasta file of unaligned test sequences.
request_all_match_output: boolean. If True, run hmmsearch with --max
to turn off all filtering.
Returns:
list of HMMEROutputs.
"""
output = run_hmmsearch_for_profile(
hmm_profile,
test_sequence_file,
request_all_match_output=request_all_match_output)
hmm_profile_family = hmmer_utils.get_family_from(hmm_profile)
hmmer_output = hmmer_utils.parse_hmmer_output(output, hmm_profile_family)
return hmmer_output
def write_hmmsearch_outputs_for_all_profiles(
hmm_dir, test_sequence_file, parsed_output, request_all_match_output):
"""Run hmmsearch over testseqs for each profile in hmm_dir, write to csv file.
The csv content is:
sequence_name, predicted_label, true_label, score
Where sequence_name is the uniprot identifier, including domain indices,
and true and predicted label are pfam family accession ids.
Args:
hmm_dir: string. Path to .hmm files created by HMMbuild.
test_sequence_file: string. Path to fasta file of unaligned test sequences.
parsed_output: string. csv file to which to write parsed HMMsearch outputs.
request_all_match_output: boolean. If True, run hmmsearch with --max
to turn off all filtering.
"""
input_hmmer_files = [
os.path.join(hmm_dir, hmm_output_file)
for hmm_output_file in os.listdir(hmm_dir)
]
list_of_kwargs_to_function = [
dict(
hmm_profile=hmm_profile,
test_sequence_file=test_sequence_file,
request_all_match_output=request_all_match_output)
for hmm_profile in input_hmmer_files
]
logging.info('Running hmmsearch for %d families.', len(input_hmmer_files))
hmmsearch_results = parallel.RunInParallel(
write_hmmsearch_outputs_for_one_profile,
list_of_kwargs_to_function,
_NUM_HMMER_PROCESSES_TO_RUN_IN_PARALLEL,
cancel_futures=True)
logging.info('Writing results to file %s', parsed_output)
with tf.io.gfile.GFile(parsed_output, 'w') as parsed_output_first_pass_file:
parsed_output_first_pass_file.write(
','.join(hmmer_utils.HMMER_OUTPUT_CSV_COLUMN_HEADERS) + '\n')
for search_result in tqdm(hmmsearch_results):
to_write = '\n'.join([str(x.format_as_csv()) for x in search_result])
parsed_output_first_pass_file.write(to_write + '\n')
def make_second_pass(hmm_dir, parsed_output_first_pass,
parsed_output_second_pass, test_sequence_file):
"""Runs hmmsearch with higher specificity on seqs that have no predictions.
- Computes set of sequences for which the first pass did not produce any
predictions.
- Reruns hmmsearch with --max argument to get even more predictions (slower,
but more correct).
Args:
hmm_dir: string. Path to .hmm files created by HMMbuild.
parsed_output_first_pass: string. csv file where the first pass of
lower-specificity hmmsearch results have been written.
parsed_output_second_pass: string. csv file to which to write parsed
hmmsearch outputs (of those sequences missed by the first pass).
test_sequence_file: string. Path to fasta file of unaligned test sequences.
"""
with tf.io.gfile.GFile(parsed_output_first_pass, 'r') as pred_file:
first_pass_predictions = | pd.read_csv(pred_file) | pandas.read_csv |
import ifcopenshell as ifc
import plotly.express as px
import pandas as pd
def get_attr_of_pset(_id, ifc_file):
""" Get all attributes of an instance by given Id
param _id: id of instance
return: dict of dicts of attributes
"""
dict_psets = {}
try:
defined_by_type = [x.RelatingType for x in ifc_file[_id].IsDefinedBy if x.is_a("IfcRelDefinesByType")]
defined_by_properties = [x.RelatingPropertyDefinition for x in ifc_file[_id].IsDefinedBy if
x.is_a("IfcRelDefinesByProperties")]
except:
dict_psets.update({ifc_file[_id].GlobalId: "No Attributes found"})
else:
for x in defined_by_type:
if x.HasPropertySets:
for y in x.HasPropertySets:
for z in y.HasProperties:
dict_psets.update({z.Name: z.NominalValue.wrappedValue})
for x in defined_by_properties:
if x.is_a("IfcPropertySet"):
for y in x.HasProperties:
if y.is_a("IfcPropertySingleValue"):
dict_psets.update({y.Name: y.NominalValue.wrappedValue})
# this could be usefull for multilayered walls in Allplan
if y.is_a("IfcComplexProperty"):
for z in y.HasProperties:
dict_psets.update({z.Name: z.NominalValue.wrappedValue})
if x.is_a("IfcElementQuantity"):
for y in x.Quantities:
dict_psets.update({y[0]: y[3]})
finally:
dict_psets.update({"IfcGlobalId": ifc_file[_id].GlobalId})
return dict_psets
def get_structural_storey(_id, ifc_file):
""" Get structural (IfcBuilgingStorey) information of an instance by given Id
param _id: id of instance
return: dict of attributes
"""
dict_structural = {}
instance = ifc_file[_id]
try:
structure = instance.ContainedInStructure
storey = structure[0].RelatingStructure.Name
except:
dict_structural.update({"Storey": "No Information found"})
else:
dict_structural.update({"Storey": storey})
finally:
return dict_structural
def movecol(df, cols_to_move=[], ref_col='', place='After'):
cols = df.columns.tolist()
if place == 'After':
seg1 = cols[:list(cols).index(ref_col) + 1]
seg2 = cols_to_move
if place == 'Before':
seg1 = cols[:list(cols).index(ref_col)]
seg2 = cols_to_move + [ref_col]
seg1 = [i for i in seg1 if i not in seg2]
seg3 = [i for i in cols if i not in seg1 + seg2]
return (df[seg1 + seg2 + seg3])
def parser(contents):
ifc_file = ifc.open(contents)
rooms = ifc_file.by_type("IfcSpace")
instances = ifc_file.by_type("IfcBuildingElement")
project = ifc_file.by_type("IfcProject")[0].Name
for room in rooms:
instances.append(room)
excel_list = []
for inst in instances:
info_pset = get_attr_of_pset(inst.id(), ifc_file=ifc_file)
info = inst.get_info()
for x in inst.IsDefinedBy:
if x.is_a("IfcRelDefinesByType") == True:
info_pset.update({"Type_Name": x.get_info()["RelatingType"].Name})
else:
pass
info_pset.update({"Name": inst.Name})
info_pset.update({"IfcType": info["type"]})
info_pset.update({"Project": project})
if inst.is_a("IfcSpace") == True:
info_structural = inst.Decomposes[0].RelatingObject.Name
info_pset.update({"Storey": info_structural})
else:
info_structural = get_structural_storey(inst.id(), ifc_file=ifc_file)
info_pset.update(info_structural)
excel_list.append(info_pset)
df1 = pd.DataFrame(excel_list)
df2 = movecol(df1,
cols_to_move=['IfcType', 'Storey'],
ref_col=df1.columns[0],
place='Before')
return df2
def all_divide(df2, path):
worterbuch = {}
for item in df2.IfcType.unique():
DF = df2[df2['IfcType'].str.contains(item, na=False)]
DF = DF.dropna(axis='columns', how='all')
worterbuch[item] = DF
with pd.ExcelWriter(path) as writer:
for i in worterbuch.keys():
worterbuch[i].to_excel(writer, sheet_name=i)
def unique(df, path):
names = []
data = []
for column in df.columns:
name = column
value = list(df[name].unique())
names.append(name)
data.append(value)
df2 = pd.DataFrame(data, names)
df2 = df2.transpose()
df2.to_excel(path)
def unique_csv(df, path):
names = []
data = []
for column in df.columns:
name = column
value = list(df[name].unique())
names.append(name)
data.append(value)
df2 = | pd.DataFrame(data, names) | pandas.DataFrame |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import pandas as pd
from pandas.api.types import CategoricalDtype
import databricks.koalas as ks
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
class CategoricalTest(ReusedSQLTestCase, TestUtils):
def test_categorical_frame(self):
pdf = pd.DataFrame(
{
"a": pd.Categorical([1, 2, 3, 1, 2, 3]),
"b": pd.Categorical(["a", "b", "c", "a", "b", "c"], categories=["c", "b", "a"]),
},
index=pd.Categorical([10, 20, 30, 20, 30, 10], categories=[30, 10, 20], ordered=True),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf, pdf)
self.assert_eq(kdf.a, pdf.a)
self.assert_eq(kdf.b, pdf.b)
self.assert_eq(kdf.index, pdf.index)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kdf.sort_values("b"), pdf.sort_values("b"))
def test_categorical_series(self):
pser = pd.Series([1, 2, 3], dtype="category")
kser = ks.Series([1, 2, 3], dtype="category")
self.assert_eq(kser, pser)
self.assert_eq(kser.cat.categories, pser.cat.categories)
self.assert_eq(kser.cat.codes, pser.cat.codes)
self.assert_eq(kser.cat.ordered, pser.cat.ordered)
def test_astype(self):
pser = pd.Series(["a", "b", "c"])
kser = ks.from_pandas(pser)
self.assert_eq(kser.astype("category"), pser.astype("category"))
self.assert_eq(
kser.astype(CategoricalDtype(["c", "a", "b"])),
pser.astype(CategoricalDtype(["c", "a", "b"])),
)
pcser = pser.astype(CategoricalDtype(["c", "a", "b"]))
kcser = kser.astype(CategoricalDtype(["c", "a", "b"]))
self.assert_eq(kcser.astype("category"), pcser.astype("category"))
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pcser.astype(CategoricalDtype(["b", "c", "a"])),
)
else:
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pser.astype(CategoricalDtype(["b", "c", "a"])),
)
self.assert_eq(kcser.astype(str), pcser.astype(str))
def test_factorize(self):
pser = pd.Series(["a", "b", "c", None], dtype=CategoricalDtype(["c", "a", "d", "b"]))
kser = ks.from_pandas(pser)
pcodes, puniques = pser.factorize()
kcodes, kuniques = kser.factorize()
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
pcodes, puniques = pser.factorize(na_sentinel=-2)
kcodes, kuniques = kser.factorize(na_sentinel=-2)
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
def test_groupby_apply(self):
pdf = pd.DataFrame(
{
"a": | pd.Categorical([1, 2, 3, 1, 2, 3]) | pandas.Categorical |
import datetime
import logging
import os.path
import random
import sys
from os import path
from typing import List
import pandas as pd
from tqdm import tqdm
from query_formulation.search import ElasticSearch, SibilsElastic
random.seed(0)
# Gets or creates a logger
def get_logger(filename: str, name: str):
logger = logging.getLogger(name)
# set log level
logger.setLevel(logging.INFO)
# define file handler and set formatter
file_handler = logging.FileHandler(filename)
formatter = logging.Formatter("%(asctime)s : %(levelname)s : %(message)s")
file_handler.setFormatter(formatter)
# add file handler to logger
logger.addHandler(file_handler)
return logger
logger = get_logger("queries.log", __name__)
logger.info("logging initiated")
current_path = path.abspath(path.dirname(__file__))
data_dir = path.join(current_path, 'data')
if len(sys.argv) > 1 and sys.argv[1].lower() == 'sigir':
dataset_type = 'SIGIR_'
else:
dataset_type = ''
searcher = ElasticSearch(None)
relevance_df: pd.DataFrame = pd.read_csv(f'{data_dir}/{dataset_type}results.csv')
topics_df: pd.DataFrame = pd.read_csv(f"{data_dir}/{dataset_type}topics.csv")
def evaluate_result(topic_id: str, query_result: List[str], recall_at_count=1000):
int_query_result = []
for idx, r in enumerate(query_result):
try:
int_query_result.append(int(r) )# pubmed ids are returned as string, convert them to integer)
except:
continue
relevant_pids = relevance_df[relevance_df["topic_id"] == topic_id]['pubmed_id'].tolist()
assert len(relevant_pids)
total_relevant = len(relevant_pids)
total_found = len(int_query_result)
recall = precision = f_score = recall_at = 0
if not total_found:
return recall, precision, f_score, recall_at
true_positives = set(relevant_pids).intersection(int_query_result)
tp = len(true_positives)
recall = round(tp / total_relevant, 4)
if len(int_query_result) > recall_at_count:
true_positives_at = set(relevant_pids).intersection(
int_query_result[:recall_at_count]
)
recall_at = round(len(true_positives_at) / total_relevant, 4)
else:
recall_at = recall
precision = round(tp / total_found, 5)
if not precision and not recall:
f_score = 0
else:
f_score = 2 * precision * recall / (precision + recall)
return recall, precision, recall_at, f_score
def search_and_eval(row):
query = row["query"]
topic_id = row["topic_id"]
end_date, start_date = None, None
if 'start_date' in row and row['start_date']:
try:
start_date = datetime.datetime.strptime(row['start_date'], '%Y-%m-%d')
except Exception:
start_date = None
if 'end_date' in row and row['end_date']:
try:
end_date = datetime.datetime.strptime(row['end_date'], '%Y-%m-%d')
except Exception:
end_date = None
try:
results = searcher.search_pubmed(query, start_date=start_date, end_date=end_date)
except Exception as e:
logger.warning(
f"ERROR: topic_id {topic_id} with query {query} error {e}"
)
return | pd.DataFrame() | pandas.DataFrame |
'''
/*******************************************************************************
* Copyright 2016-2019 Exactpro (Exactpro Systems Limited)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
'''
import numpy
import matplotlib.pyplot as plt
import scipy.stats as stats
import pandas
import datetime
import calendar
class RelativeFrequencyChart:
# returns coordinates for each chart column
def get_coordinates(self, data, bins): # bins - chart columns count
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, weights=numpy.zeros_like(self.btt) + 1. / self.btt.size, bins=bins)
return self.x, self.y
class FrequencyDensityChart:
def get_coordinates_histogram(self, data, bins):
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, bins=bins, density=True)
return self.x, self.y
def get_coordinates_line(self, data):
try:
self.btt = numpy.array(list(data))
self.density = stats.kde.gaussian_kde(list(data))
self.x_den = numpy.linspace(0, data.max(), data.count())
self.density = self.density(self.x_den)
return self.x_den, self.density
except numpy.linalg.linalg.LinAlgError:
return [-1], [-1]
class DynamicChart:
def get_coordinates(self, frame, step_size):
self.plot = {} # chart coordinates
self.dynamic_bugs = []
self.x = []
self.y = []
self.plot['period'] = step_size
if step_size == 'W-SUN':
self.periods = DynamicChart.get_periods(self, frame, step_size) # separates DataFrame to the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0 # cumulative total of defect submission for specific period
for self.period in self.periods:
# checks whether the first day of period is Monday (if not then we change first day to Monday)
if pandas.to_datetime(self.period[0]) < pandas.to_datetime(frame['Created_tr']).min():
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min()) &
(pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min())))
self.y.append(self.cumulative)
else:
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(self.period[0]))
& (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str((self.period[0])))
self.y.append(self.cumulative)
# check whether the date from new DataFrame is greater than date which is specified in settings
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(self.periods[-1][1]):
# processing of days which are out of full period set
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) > pandas.to_datetime(self.periods[-1][1]))
& (pandas.to_datetime(frame['Created_tr']) <=
pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(self.periods[-1][1], format='%Y-%m-%d')) + datetime.timedelta(days=1)))
self.y.append(self.cumulative)
self.dynamic_bugs.append(self.x)
self.dynamic_bugs.append(self.y)
self.plot['dynamic bugs'] = self.dynamic_bugs
self.cumulative = 0
return self.plot
if step_size in ['7D', '10D', '3M', '6M', 'A-DEC']:
self.count0 = 0
self.count1 = 1
self.periods = DynamicChart.get_periods(self, frame, step_size) # DataFrame separation by the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0
self.countPeriodsList = len(self.periods) # count of calculated periods
self.count = 1
if self.countPeriodsList == 1:
if step_size == '7D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr'])
< pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())
+datetime.timedelta(days=7)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=7)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+
datetime.timedelta(days=7))) & (pandas.to_datetime(frame['Created_tr'])
<= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=7), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '10D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min()) & (pandas.to_datetime(frame['Created_tr']) < pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '3M':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr']) <
pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '6M':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr']) <
pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == 'A-DEC':
self.newFrame = frame[( | pandas.to_datetime(frame['Created_tr']) | pandas.to_datetime |
from app.helpers.plotter import plot_confusion_matrix
from app.helpers.model_exec import model_exec
from app.helpers.selection import metric_data_prep
from app.helpers.selection import get_mic_chi2_s_df
from app.helpers.model_exec import model_eval
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score as acc
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import chi2
from sklearn.svm import SVC
from sklearn.feature_selection import SelectFromModel
import pandas as pd
import numpy as np
import operator
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
plt.ion()
import os
path_ = os.getcwd()
# STEP-1: Count vectorizer data import.
data_path = path_ + r"/data_prep/final_data/opportunity/"
lit_df = pd.read_csv(data_path+"lit.csv")
pos_df = pd.read_csv(data_path+"pos.csv")
neg_df = | pd.read_csv(data_path+"neg.csv") | pandas.read_csv |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
concat,
date_range,
)
import pandas._testing as tm
class TestEmptyConcat:
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
{"A": range(10000)}, index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"left,right,expected",
[
# booleans
(np.bool_, np.int32, np.int32),
(np.bool_, np.float32, np.object_),
# datetime-like
("m8[ns]", np.bool_, np.object_),
("m8[ns]", np.int64, np.object_),
("M8[ns]", np.bool_, np.object_),
("M8[ns]", np.int64, np.object_),
# categorical
("category", "category", "category"),
("category", "object", "object"),
],
)
def test_concat_empty_series_dtypes(self, left, right, expected):
result = concat([Series(dtype=left), Series(dtype=right)])
assert result.dtype == expected
@pytest.mark.parametrize(
"dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]
)
def test_concat_empty_series_dtypes_match_roundtrips(self, dtype):
dtype = np.dtype(dtype)
result = concat([Series(dtype=dtype)])
assert result.dtype == dtype
result = concat([Series(dtype=dtype), Series(dtype=dtype)])
assert result.dtype == dtype
def test_concat_empty_series_dtypes_roundtrips(self):
# round-tripping with self & like self
dtypes = map(np.dtype, ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"])
def int_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"i", "u", "b"}) and (
dtype.kind == "i" or dtype2.kind == "i"
):
return "i"
elif not len(typs - {"u", "b"}) and (
dtype.kind == "u" or dtype2.kind == "u"
):
return "u"
return None
def float_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"f", "i", "u"}) and (
dtype.kind == "f" or dtype2.kind == "f"
):
return "f"
return None
def get_result_type(dtype, dtype2):
result = float_result_type(dtype, dtype2)
if result is not None:
return result
result = int_result_type(dtype, dtype2)
if result is not None:
return result
return "O"
for dtype in dtypes:
for dtype2 in dtypes:
if dtype == dtype2:
continue
expected = get_result_type(dtype, dtype2)
result = concat([Series(dtype=dtype), Series(dtype=dtype2)]).dtype
assert result.kind == expected
def test_concat_empty_series_dtypes_triple(self):
assert (
concat(
[Series(dtype="M8[ns]"), Series(dtype=np.bool_), Series(dtype=np.int64)]
).dtype
== np.object_
)
def test_concat_empty_series_dtype_category_with_array(self):
# GH#18515
assert (
concat(
[Series(np.array([]), dtype="category"), Series(dtype="float64")]
).dtype
== "float64"
)
def test_concat_empty_series_dtypes_sparse(self):
result = concat(
[
Series(dtype="float64").astype("Sparse"),
Series(dtype="float64").astype("Sparse"),
]
)
assert result.dtype == "Sparse[float64]"
result = concat(
[Series(dtype="float64").astype("Sparse"), Series(dtype="float64")]
)
expected = pd.SparseDtype(np.float64)
assert result.dtype == expected
result = concat(
[Series(dtype="float64").astype("Sparse"), Series(dtype="object")]
)
expected = pd.SparseDtype("object")
assert result.dtype == expected
def test_concat_empty_df_object_dtype(self):
# GH 9149
df_1 = DataFrame({"Row": [0, 1, 1], "EmptyCol": np.nan, "NumberCol": [1, 2, 3]})
df_2 = DataFrame(columns=df_1.columns)
result = concat([df_1, df_2], axis=0)
expected = df_1.astype(object)
tm.assert_frame_equal(result, expected)
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_concat_empty_dataframe(self):
# 39037
df1 = DataFrame(columns=["a", "b"])
df2 = DataFrame(columns=["b", "c"])
result = concat([df1, df2, df1])
expected = DataFrame(columns=["a", "b", "c"])
tm.assert_frame_equal(result, expected)
df3 = DataFrame(columns=["a", "b"])
df4 = DataFrame(columns=["b"])
result = concat([df3, df4])
expected = DataFrame(columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_concat_empty_dataframe_different_dtypes(self):
# 39037
df1 = DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
df2 = DataFrame({"a": [1, 2, 3]})
result = | concat([df1[:0], df2[:0]]) | pandas.concat |
"""
Main script to generate scenarios
"""
# System
import os
import sys
import argparse
import logging
# Externals
import yaml
import numpy as np
import pandas as pd
# Locals
from powerscenarios.parser import Parser
from powerscenarios.grid import Grid
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser("generate_scenarios.py")
add_arg = parser.add_argument
add_arg("config", nargs="?", default="config.yaml")
# add_arg('-d', '--distributed', action='store_true')
add_arg("-v", "--verbose", action="store_true")
# parameters which override the YAML file, if needed
#
return parser.parse_args()
def config_logging(verbose,output_dir):
# log_format = '%(asctime)s %(levelname)s %(message)s'
log_level = logging.DEBUG if verbose else logging.INFO
# logging.basicConfig(level=log_level, format=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(log_level)
# define file handler and set formatter
file_handler = logging.FileHandler(os.path.join(output_dir,"logfile.log"))
formatter = logging.Formatter(
"%(asctime)s : %(levelname)s : %(name)s : %(message)s"
)
file_handler.setFormatter(formatter)
# add file handler to logger
logger.addHandler(file_handler)
return logger
def load_config(args):
# Read base config from yaml file
config_file = args.config
with open(config_file) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
# can override with command line arguments here if needed
return config
def main():
""" Main function """
# Initialization
args = parse_args()
# Load configuration
config = load_config(args)
output_dir = os.path.expandvars(config["output"]["dir"])
os.makedirs(output_dir, exist_ok=True)
# Loggging
logger = config_logging(verbose=args.verbose,output_dir=output_dir)
data_dir = os.path.expandvars(config["data_dir"])
grid_name = config["grid"]["name"]
# load TAMU grid data
# path to .aux file (TAMU grids) obtained from e.g.
# https://electricgrids.engr.tamu.edu/electric-grid-test-cases/activsg200/
if grid_name[:7] == 'ACTIVSg':
#aux_file_name = data_dir + grid_name + "/" + grid_name + ".aux"
aux_file_name = os.path.join(data_dir, grid_name, grid_name + ".aux")
# parse original .aux file and return dataframes for buses, generators, and wind generators
# here, we need .aux files because those are the only ones with Latitute/Longitude information
parser = Parser()
bus_df, gen_df, wind_gen_df = parser.parse_tamu_aux(aux_file_name)
elif grid_name == 'RTS':
bus_csv_filename = os.path.join(data_dir, grid_name, "bus.csv")
gen_csv_filename = os.path.join(data_dir, grid_name, "gen.csv")
parser = Parser()
# if solar2wind, will replace all solar with wind
bus_df, gen_df, wind_gen_df = parser.parse_rts_csvs(
bus_csv_filename, gen_csv_filename, solar2wind=config["RTS_solar2wind"]
)
# to instantiate a grid we need: name, bus, generator, and wind generator dataframes from Parser
# really, we only wind generators with lat/long, might change in the future
grid = Grid(grid_name, bus_df, gen_df, wind_gen_df)
logger.info(grid.info())
# print(grid.info())
if config["wind_penetration"]["change"]==True:
logger.info("changing wind penetration")
grid.change_wind_penetration(config["wind_penetration"]["new_value"])
logger.info(grid.info())
logger.info("retrieving wind sites")
# retrieve wind sites (wind_sites are initially set to empty df )
grid.retrieve_wind_sites(method="simple proximity")
logger.info("making tables")
# sampling tables
grid.make_tables(
actuals_start=pd.Timestamp(config["tables"]["actuals_start"], tz="utc"),
actuals_end=pd.Timestamp(config["tables"]["actuals_end"], tz="utc"),
scenarios_start=pd.Timestamp(config["tables"]["scenarios_start"], tz="utc"),
scenarios_end=pd.Timestamp(config["tables"]["scenarios_end"], tz="utc"),
)
logger.info("generating scenarios")
# timespan for wanted scenarios
# time period for which to generate scenarios
scenario_start = | pd.Timestamp(config["scenario"]["start"], tz="utc") | pandas.Timestamp |
'''
@lptMusketeers 2017.10.20
'''
import pandas as pd
import datetime
from functools import reduce
import codecs
import csv
from decimal import *
import numpy as np
class FeatureEngineering(object):
def nondrop_precent(self,source_path,target_path):
print("nondrop_precent...")
df1 = | pd.read_csv(source_path) | pandas.read_csv |
# Copyright <NAME>, 2021 Licensed under MIT License.
# See the LICENSE.txt for more information.
import unittest
import pandas as pd
from data.smodel_data_preprocessor import SeriesModelDataPreprocessor
class SeriesPreprocessTest(unittest.TestCase):
def test_preprocess(self):
d = {'Open': [1, 2], 'Date': [3, 4], 'End': [4, 6]}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
import logging
import pandas as pd
import pytest
from split_schedule.errors import NoScheduleError
from split_schedule.schedule_builder import ScheduleBuilder, SchedulingError
from tests.helpers import init_classes_check, reduce_classes_check, total_classes_check
@pytest.mark.parametrize("max_tries", [1, 2])
@pytest.mark.parametrize("verbose", [True, False])
def test_build_schedule_validated_classs_size(monkeypatch, tmp_path, caplog, max_tries, verbose):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data_1 = {
"block": [1],
"class": ["test class 1"],
"student": ["<NAME>"],
}
df_1 = pd.DataFrame(data_1)
df_1.to_excel(test_file, index=False, engine="openpyxl")
def mock_return(*args, **kwargs):
return pd.DataFrame(
{
"student": ["<NAME>"],
"original": 3,
"scheduled": 2,
}
).set_index("student")
schedule_builder = ScheduleBuilder()
monkeypatch.setattr(ScheduleBuilder, "_validate_class_size", mock_return)
with pytest.raises(SchedulingError):
schedule_builder.build_schedule_from_file(
test_file, 0.2, max_tries=max_tries, verbose=verbose
)
if verbose:
assert "Classes contain too many students" in caplog.text
else:
assert "Classes contain too many students" not in caplog.text
@pytest.mark.parametrize("max_tries", [1, 2])
@pytest.mark.parametrize("verbose", [True, False])
def test_build_schedule_validated_classes_number(monkeypatch, tmp_path, caplog, max_tries, verbose):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data_1 = {
"block": [1],
"class": ["test class 1"],
"student": ["test 1"],
}
df_1 = pd.DataFrame(data_1)
df_1.to_excel(test_file, index=False, engine="openpyxl")
data_2 = {
"block": [1, 2, 1, 2],
"class": ["test class 1", "test class 2", "test class 1", "test class 2"],
"student": ["test 1", "test 1", "test 2", "test 2"],
}
def mock_return_validated_classes(*args, **kwargs):
return pd.DataFrame(data_2)
schedule_builder = ScheduleBuilder()
monkeypatch.setattr(ScheduleBuilder, "_validate_classes", mock_return_validated_classes)
with pytest.raises(SchedulingError):
schedule_builder.build_schedule_from_file(
test_file, 0.2, max_tries=max_tries, verbose=verbose
)
if verbose:
assert "Student missing" in caplog.text
else:
assert "Student missing" not in caplog.text
@pytest.mark.parametrize("max_tries", [1, 2])
@pytest.mark.parametrize("verbose", [True, False])
def test_build_schedule_validated_same_day(monkeypatch, tmp_path, caplog, max_tries, verbose):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data_1 = {
"block": [1],
"class": ["test class 1"],
"student": ["test 1"],
}
df_1 = pd.DataFrame(data_1)
df_1.to_excel(test_file, index=False, engine="openpyxl")
data_2 = {
"block": [
1,
1,
2,
],
"class": [
"test class 1",
"test class 1",
"test class 3",
],
"total_students": [
2,
2,
1,
],
"max_students": [
2,
2,
1,
],
"num_classes": [
1,
1,
1,
],
"day_number": [
1,
1,
2,
],
"student": [
"test 1",
"test 2",
"test 1",
],
}
def mock_return_validated_days(*args, **kwargs):
return pd.DataFrame(data_2)
schedule_builder = ScheduleBuilder()
monkeypatch.setattr(ScheduleBuilder, "_validate_same_day", mock_return_validated_days)
with pytest.raises(SchedulingError):
schedule_builder.build_schedule_from_file(
test_file, 0.2, max_tries=max_tries, verbose=verbose
)
if verbose:
assert "Student not on the same day" in caplog.text
else:
assert "Student not on the same day" not in caplog.text
@pytest.mark.parametrize("max_tries", [1, 2])
@pytest.mark.parametrize("verbose", [True, False])
def test_build_schedule_validated_students(monkeypatch, tmp_path, caplog, max_tries, verbose):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data_1 = {
"block": [1],
"class": ["test class 1"],
"student": ["<NAME>"],
}
df_1 = pd.DataFrame(data_1)
df_1.to_excel(test_file, index=False, engine="openpyxl")
def mock_return_validated_students(*args, **kwargs):
return ["test 1"]
schedule_builder = ScheduleBuilder()
monkeypatch.setattr(ScheduleBuilder, "_validate_students", mock_return_validated_students)
with pytest.raises(SchedulingError):
schedule_builder.build_schedule_from_file(
test_file, 0.2, max_tries=max_tries, verbose=verbose
)
if verbose:
assert "Student original number" in caplog.text
else:
assert "Student original number" not in caplog.text
@pytest.mark.parametrize("verbose", [True, False])
def test_build_schedule_restart(monkeypatch, tmp_path, caplog, verbose):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data_1 = {
"block": [1],
"class": ["test class 1"],
"student": ["test 1"],
}
df_1 = pd.DataFrame(data_1)
df_1.to_excel(test_file, index=False, engine="openpyxl")
def mock_return(*args, **kwargs):
return None
schedule_builder = ScheduleBuilder()
monkeypatch.setattr(ScheduleBuilder, "_fill_classes", mock_return)
with pytest.raises(SchedulingError):
schedule_builder.build_schedule_from_file(test_file, 0.2, max_tries=2, verbose=verbose)
if verbose:
assert "No schedule found. Retrying" in caplog.text
else:
assert "No schedule found. Retrying" not in caplog.text
def test_fill_classes_match_no_space(tmp_path):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data = {
"block": [1, 1, 2, 2],
"class": [
"test class 1",
"test class 1",
"test class 2",
"test class 2",
],
"student": ["test 1", "test 2", "test 1", "test 2"],
}
df = pd.DataFrame(data)
df.to_excel(test_file, index=False, engine="openpyxl")
fill_classes = [
{
"block": 1,
"class_name": "test class 1",
"total_students": 2,
"max_students": 1,
"num_classes": 1,
"classes": [set()],
},
{
"block": 2,
"class_name": "test class 2",
"total_students": 2,
"max_students": 1,
"num_classes": 1,
"classes": [set()],
},
]
student_classes_grouped = {
"test 1": {"blocks": {1: "test class 1", 2: "test class 2"}},
"test 2": {"blocks": {1: "test class 1", 2: "test class 2"}},
}
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_file)
fill_classes = schedule_builder._fill_classes(fill_classes, student_classes_grouped)
assert not fill_classes
def test_fill_classes_no_match_no_space(tmp_path):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data = {
"block": [1],
"class": [
"test class 1",
],
"student": ["test 1"],
}
df = pd.DataFrame(data)
df.to_excel(test_file, index=False, engine="openpyxl")
fill_classes = [
{
"block": 1,
"class_name": "test class 1",
"total_students": 1,
"max_students": 0,
"num_classes": 1,
"classes": [set()],
},
]
student_classes_grouped = {
"test 1": {"blocks": {1: "test class 1", 2: "test class 2"}},
}
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_file)
fill_classes = schedule_builder._fill_classes(fill_classes, student_classes_grouped)
assert not fill_classes
def test_fill_classes_match_move_day(tmp_path):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data = {
"block": [1, 2, 1, 2, 1, 2],
"class": [
"test class 1",
"test class 2",
"test class 1",
"test class 2",
"test class 1",
"test class 2",
],
"student": ["test 1", "test 1", "test 2", "test 2", "test 3", "test 3"],
}
df = pd.DataFrame(data)
df.to_excel(test_file, index=False, engine="openpyxl")
fill_classes = [
{
"block": 1,
"class_name": "test class 1",
"total_students": 3,
"max_students": 2,
"num_classes": 2,
"classes": [set(), set()],
},
{
"block": 2,
"class_name": "test class 2",
"total_students": 3,
"max_students": 2,
"num_classes": 2,
"classes": [set(), set()],
},
]
student_classes_grouped = {
"test 1": {"blocks": {1: "test class 1", 2: "test class 2"}},
"test 2": {"blocks": {1: "test class 1", 2: "test class 2"}},
"test 3": {"blocks": {1: "test class 1", 2: "test class 2"}},
}
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_file)
fill_classes = schedule_builder._fill_classes(fill_classes, student_classes_grouped)
class_size = [sorted([len(y) for y in x["classes"]]) for x in fill_classes]
expected = [[1, 2], [1, 2]]
assert expected == class_size
def test_find_matches(student_matches_check, test_schedule):
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
matches = schedule_builder._find_matches()
assert matches == student_matches_check
def test_find_matches_unused_order_found(tmp_path, caplog):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data = {
"block": [1, 1, 2, 2],
"class": [
"test class 1",
"test class 1",
"test class 2",
"test class 2",
],
"student": ["test 1", "test 2", "test 1", "test 2"],
}
df = pd.DataFrame(data)
df.to_excel(test_file, index=False, engine="openpyxl")
class TestingScheduleBuilder(ScheduleBuilder):
def __init__(self, schedule_file_path):
self.final_schedule_df = None
self._schedule_df = self._load_data(schedule_file_path)
self._attempted_df = [df]
self._attempt = 1
self._verbose = True
logging.basicConfig(format="%(asctime)s: %(levelname)s: %(message)s")
logging.root.setLevel(level=logging.INFO)
self._logger = logging.getLogger()
schedule_builder = TestingScheduleBuilder(test_file)
schedule_builder._find_matches()
assert "Unused student order found" in caplog.text
def test_find_matches_unused_order_not_found(tmp_path, caplog):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data_1 = {
"block": [1, 1, 2, 2],
"class": [
"test class 1",
"test class 1",
"test class 2",
"test class 2",
],
"student": ["test 1", "test 2", "test 1", "test 2"],
}
df_1 = pd.DataFrame(data_1)
df_1.to_excel(test_file, index=False, engine="openpyxl")
data_2 = {
"block": [1, 1, 2, 2],
"class": [
"test class 1",
"test class 1",
"test class 2",
"test class 2",
],
"student": ["test 2", "test 1", "test 2", "test 1"],
}
df_2 = pd.DataFrame(data_2)
class TestingScheduleBuilder(ScheduleBuilder):
def __init__(self, schedule_file_path):
self.final_schedule_df = None
self._schedule_df = self._load_data(schedule_file_path)
self._attempted_df = [df_1, df_2]
self._attempt = 1
self._verbose = True
logging.basicConfig(format="%(asctime)s: %(levelname)s: %(message)s")
logging.root.setLevel(level=logging.INFO)
self._logger = logging.getLogger()
schedule_builder = TestingScheduleBuilder(test_file)
schedule_builder._find_matches()
assert "No unused matches found" in caplog.text
def test_find_matches_retry(student_matches_check, test_schedule):
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
schedule_builder._find_matches()
matches = schedule_builder._find_matches()
m_keys = [x.keys() for x in matches]
s_keys = [x.keys() for x in student_matches_check]
m_vals = [[[sorted(z) for z in y] for y in (list(x.values()))] for x in matches]
s_vals = [[[sorted(z) for z in y] for y in (list(x.values()))] for x in student_matches_check]
assert m_keys == s_keys
assert m_vals == s_vals
def test_get_class_size(class_size_check, test_schedule):
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
class_size = schedule_builder._get_class_size()
assert class_size == class_size_check
def test_get_student_classes(student_classes_check, test_schedule):
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
student_classes = schedule_builder._get_student_classes()
assert student_classes == student_classes_check
@pytest.mark.parametrize("reduce_by", [0.1, 0.2, 0.5])
@pytest.mark.parametrize("smallest_allowed", [1, 5, 10])
def test_get_total_classes(class_size_check, reduce_by, smallest_allowed, test_schedule):
reduced_classes = reduce_classes_check(reduce_by, smallest_allowed, class_size_check)
check_total_classes = total_classes_check(reduced_classes)
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
total_classes = schedule_builder._get_total_classes(reduced_classes)
assert total_classes == check_total_classes
@pytest.mark.parametrize("reduce_by", [0.1, 0.2, 0.5])
@pytest.mark.parametrize("smallest_allowed", [1, 5, 10])
def test_init_classes(class_size_check, reduce_by, smallest_allowed, test_schedule):
expected = init_classes_check(class_size_check, reduce_by, smallest_allowed)
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
classes = schedule_builder._init_classes(reduce_by, smallest_allowed)
assert classes == expected
def test_init_schedule_builder(test_schedule):
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
test = pd.read_excel(str(test_schedule), engine="openpyxl")
assert test.equals(schedule_builder._schedule_df)
@pytest.mark.parametrize("reduce_by", [0.1, 0.2, 0.5])
@pytest.mark.parametrize("smallest_allowed", [1, 5, 10])
def test_reduce_class(class_size_check, reduce_by, smallest_allowed, test_schedule):
check_reduced = reduce_classes_check(reduce_by, smallest_allowed, class_size_check)
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
reduced_class = schedule_builder._reduce_class(class_size_check, reduce_by, smallest_allowed)
assert reduced_class == check_reduced
def test_save_schedule_to_file(tmp_path, test_schedule):
export_path = tmp_path.joinpath("schedule.xlsx")
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
schedule_builder.save_schedule(export_path)
assert export_path.exists()
def test_save_schedule_check_columns(tmp_path, test_schedule):
export_path = tmp_path.joinpath("schedule.xlsx")
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
schedule_builder.save_schedule(export_path)
df_saved = pd.read_excel(export_path, engine="openpyxl")
columns = df_saved.columns.values.tolist()
assert columns == [
"block",
"class",
"total_students",
"max_students",
"num_classes",
"day_number",
"student",
]
def test_validate_class_size_pass(tmp_path):
test_file = str(tmp_path.joinpath("test.xlsx"))
data = {
"block": [
1,
1,
],
"class": [
"test class 1",
"test class 1",
],
"total_students": [
2,
2,
],
"max_students": [
2,
2,
],
"num_classes": [
1,
1,
],
"day_number": [
1,
1,
],
"student": [
"test 1",
"test 2",
],
}
df = pd.DataFrame(data)
df.to_excel(test_file, index=False, engine="openpyxl")
schedule_builder = ScheduleBuilder()
validate_df = schedule_builder._validate_class_size(df)
assert not validate_df
def test_validate_class_size_fail(tmp_path):
test_file = str(tmp_path.joinpath("test.xlsx"))
data = {
"block": [
1,
1,
],
"class": [
"test class 1",
"test class 1",
],
"total_students": [
2,
2,
],
"max_students": [
1,
1,
],
"num_classes": [
1,
1,
],
"day_number": [
1,
1,
],
"student": [
"test 1",
"test 2",
],
}
df = pd.DataFrame(data)
df.to_excel(test_file, index=False, engine="openpyxl")
schedule_builder = ScheduleBuilder()
validate_df = schedule_builder._validate_class_size(df)
expected_df = pd.DataFrame(
{
"block": [1],
"class": ["test class 1"],
"max_students": [1],
"day_number": [1],
"class_size": [2],
}
)
assert expected_df.equals(validate_df)
def test_validate_classes_pass(tmp_path):
test_file = str(tmp_path.joinpath("test.xlsx"))
data_1 = {
"block": [1, 2, 3, 1, 2],
"class": ["test class 1", "test class 2", "test class 3", "test class 1", "test class 2"],
"student": ["test 1", "test 1", "test 1", "test 2", "test 2"],
}
df_1 = pd.DataFrame(data_1)
df_1.to_excel(test_file, index=False, engine="openpyxl")
data_2 = data_1 = {
"block": [1, 3, 2, 1, 2],
"class": [
"test class 1",
"test class 3",
"test class 2",
"test class 1",
"test class 2",
],
"student": ["test 1", "test 1", "test 1", "test 2", "test 2"],
}
df_2 = pd.DataFrame(data_2)
schedule_builder = ScheduleBuilder()
invalid_df = schedule_builder._validate_classes(df_2)
assert not invalid_df
def test_validate_classes_fail(tmp_path):
test_file = str(tmp_path.joinpath("test.xlsx"))
data_1 = {
"block": [1, 2, 3, 1, 2],
"class": ["test class 1", "test class 2", "test class 3", "test class 1", "test class 2"],
"student": ["test 1", "test 1", "test 1", "test 2", "test 2"],
}
df_1 = pd.DataFrame(data_1)
df_1.to_excel(test_file, index=False, engine="openpyxl")
data_2 = {
"block": [1, 2, 1, 2],
"class": ["test class 1", "test class 2", "test class 1", "test class 2"],
"student": ["test 1", "test 1", "test 2", "test 2"],
}
df_2 = pd.DataFrame(data_2)
expected_df = pd.DataFrame(
{
"student": ["test 1"],
"original": 3,
"scheduled": 2,
}
).set_index("student")
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_file)
invalid_df = schedule_builder._validate_classes(df_2)
assert expected_df.equals(invalid_df)
def test_same_day_pass(tmp_path):
test_file = str(tmp_path.joinpath("test.xlsx"))
data = {
"block": [
1,
1,
2,
],
"class": [
"test class 1",
"test class 1",
"test class 3",
],
"total_students": [
2,
2,
1,
],
"max_students": [
2,
2,
1,
],
"num_classes": [
1,
1,
1,
],
"day_number": [
1,
1,
1,
],
"student": [
"test 1",
"test 2",
"test 1",
],
}
df = pd.DataFrame(data)
df.to_excel(test_file, index=False, engine="openpyxl")
schedule_builder = ScheduleBuilder()
validate = schedule_builder._validate_same_day(df)
assert not validate
def test_same_day_fail(tmp_path):
test_file = str(tmp_path.joinpath("test.xlsx"))
data = {
"block": [
1,
1,
2,
],
"class": [
"test class 1",
"test class 1",
"test class 3",
],
"total_students": [
2,
2,
1,
],
"max_students": [
2,
2,
1,
],
"num_classes": [
1,
1,
1,
],
"day_number": [
1,
1,
2,
],
"student": [
"test 1",
"test 2",
"test 1",
],
}
df = pd.DataFrame(data)
df.to_excel(test_file, index=False, engine="openpyxl")
schedule_builder = ScheduleBuilder()
validate = schedule_builder._validate_same_day(df)
expected_df = | pd.DataFrame({"student": ["test 1"], "count": ["2"]}) | pandas.DataFrame |
from __future__ import absolute_import, division, print_function
import re
import traceback
import warnings
from datetime import datetime
from functools import partial
import numpy as np
import pandas as pd
from ..core import indexing
from ..core.formatting import first_n_items, format_timestamp, last_item
from ..core.pycompat import PY3
from ..core.variable import Variable
from .variables import (
SerializationWarning, VariableCoder, lazy_elemwise_func, pop_to,
safe_setitem, unpack_for_decoding, unpack_for_encoding)
try:
from pandas.errors import OutOfBoundsDatetime
except ImportError:
# pandas < 0.20
from pandas.tslib import OutOfBoundsDatetime
# standard calendars recognized by netcdftime
_STANDARD_CALENDARS = set(['standard', 'gregorian', 'proleptic_gregorian'])
_NS_PER_TIME_DELTA = {'us': int(1e3),
'ms': int(1e6),
's': int(1e9),
'm': int(1e9) * 60,
'h': int(1e9) * 60 * 60,
'D': int(1e9) * 60 * 60 * 24}
TIME_UNITS = frozenset(['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'])
def _import_netcdftime():
'''
helper function handle the transition to netcdftime as a stand-alone
package
'''
try:
# Try importing netcdftime directly
import netcdftime as nctime
if not hasattr(nctime, 'num2date'):
# must have gotten an old version from netcdf4-python
raise ImportError
except ImportError:
# in netCDF4 the num2date/date2num function are top-level api
try:
import netCDF4 as nctime
except ImportError:
raise ImportError("Failed to import netcdftime")
return nctime
def _netcdf_to_numpy_timeunit(units):
units = units.lower()
if not units.endswith('s'):
units = '%ss' % units
return {'microseconds': 'us', 'milliseconds': 'ms', 'seconds': 's',
'minutes': 'm', 'hours': 'h', 'days': 'D'}[units]
def _unpack_netcdf_time_units(units):
# CF datetime units follow the format: "UNIT since DATE"
# this parses out the unit and date allowing for extraneous
# whitespace.
matches = re.match('(.+) since (.+)', units)
if not matches:
raise ValueError('invalid time units: %s' % units)
delta_units, ref_date = [s.strip() for s in matches.groups()]
return delta_units, ref_date
def _decode_datetime_with_netcdftime(num_dates, units, calendar):
nctime = _import_netcdftime()
dates = np.asarray(nctime.num2date(num_dates, units, calendar))
if (dates[np.nanargmin(num_dates)].year < 1678 or
dates[np.nanargmax(num_dates)].year >= 2262):
warnings.warn('Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using dummy '
'netcdftime.datetime objects instead, reason: dates out'
' of range', SerializationWarning, stacklevel=3)
else:
try:
dates = nctime_to_nptime(dates)
except ValueError as e:
warnings.warn('Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using '
'dummy netcdftime.datetime objects instead, reason:'
'{0}'.format(e), SerializationWarning, stacklevel=3)
return dates
def _decode_cf_datetime_dtype(data, units, calendar):
# Verify that at least the first and last date can be decoded
# successfully. Otherwise, tracebacks end up swallowed by
# Dataset.__repr__ when users try to view their lazily decoded array.
values = indexing.ImplicitToExplicitIndexingAdapter(
indexing.as_indexable(data))
example_value = np.concatenate([first_n_items(values, 1) or [0],
last_item(values) or [0]])
try:
result = decode_cf_datetime(example_value, units, calendar)
except Exception:
calendar_msg = ('the default calendar' if calendar is None
else 'calendar %r' % calendar)
msg = ('unable to decode time units %r with %s. Try '
'opening your dataset with decode_times=False.'
% (units, calendar_msg))
if not PY3:
msg += ' Full traceback:\n' + traceback.format_exc()
raise ValueError(msg)
else:
dtype = getattr(result, 'dtype', np.dtype('object'))
return dtype
def decode_cf_datetime(num_dates, units, calendar=None):
"""Given an array of numeric dates in netCDF format, convert it into a
numpy array of date time objects.
For standard (Gregorian) calendars, this function uses vectorized
operations, which makes it much faster than netcdftime.num2date. In such a
case, the returned array will be of type np.datetime64.
Note that time unit in `units` must not be smaller than microseconds and
not larger than days.
See also
--------
netcdftime.num2date
"""
num_dates = np.asarray(num_dates)
flat_num_dates = num_dates.ravel()
if calendar is None:
calendar = 'standard'
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if calendar not in _STANDARD_CALENDARS:
raise OutOfBoundsDatetime
delta = _netcdf_to_numpy_timeunit(delta)
try:
ref_date = pd.Timestamp(ref_date)
except ValueError:
# ValueError is raised by pd.Timestamp for non-ISO timestamp
# strings, in which case we fall back to using netcdftime
raise OutOfBoundsDatetime
# fixes: https://github.com/pydata/pandas/issues/14068
# these lines check if the the lowest or the highest value in dates
# cause an OutOfBoundsDatetime (Overflow) error
pd.to_timedelta(flat_num_dates.min(), delta) + ref_date
pd.to_timedelta(flat_num_dates.max(), delta) + ref_date
# Cast input dates to integers of nanoseconds because `pd.to_datetime`
# works much faster when dealing with integers
# make _NS_PER_TIME_DELTA an array to ensure type upcasting
flat_num_dates_ns_int = (flat_num_dates.astype(np.float64) *
_NS_PER_TIME_DELTA[delta]).astype(np.int64)
dates = (pd.to_timedelta(flat_num_dates_ns_int, 'ns') +
ref_date).values
except (OutOfBoundsDatetime, OverflowError):
dates = _decode_datetime_with_netcdftime(
flat_num_dates.astype(np.float), units, calendar)
return dates.reshape(num_dates.shape)
def decode_cf_timedelta(num_timedeltas, units):
"""Given an array of numeric timedeltas in netCDF format, convert it into a
numpy timedelta64[ns] array.
"""
num_timedeltas = np.asarray(num_timedeltas)
units = _netcdf_to_numpy_timeunit(units)
shape = num_timedeltas.shape
num_timedeltas = num_timedeltas.ravel()
result = pd.to_timedelta(num_timedeltas, unit=units, box=False)
# NaT is returned unboxed with wrong units; this should be fixed in pandas
if result.dtype != 'timedelta64[ns]':
result = result.astype('timedelta64[ns]')
return result.reshape(shape)
def _infer_time_units_from_diff(unique_timedeltas):
for time_unit in ['days', 'hours', 'minutes', 'seconds']:
delta_ns = _NS_PER_TIME_DELTA[_netcdf_to_numpy_timeunit(time_unit)]
unit_delta = np.timedelta64(delta_ns, 'ns')
diffs = unique_timedeltas / unit_delta
if np.all(diffs == diffs.astype(int)):
return time_unit
return 'seconds'
def infer_datetime_units(dates):
"""Given an array of datetimes, returns a CF compatible time-unit string of
the form "{time_unit} since {date[0]}", where `time_unit` is 'days',
'hours', 'minutes' or 'seconds' (the first one that can evenly divide all
unique time deltas in `dates`)
"""
dates = pd.to_datetime(np.asarray(dates).ravel(), box=False)
dates = dates[pd.notnull(dates)]
unique_timedeltas = np.unique(np.diff(dates))
units = _infer_time_units_from_diff(unique_timedeltas)
reference_date = dates[0] if len(dates) > 0 else '1970-01-01'
return '%s since %s' % (units, pd.Timestamp(reference_date))
def infer_timedelta_units(deltas):
"""Given an array of timedeltas, returns a CF compatible time-unit from
{'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly
divide all unique time deltas in `deltas`)
"""
deltas = pd.to_timedelta(np.asarray(deltas).ravel(), box=False)
unique_timedeltas = np.unique(deltas[pd.notnull(deltas)])
units = _infer_time_units_from_diff(unique_timedeltas)
return units
def nctime_to_nptime(times):
"""Given an array of netcdftime.datetime objects, return an array of
numpy.datetime64 objects of the same size"""
times = np.asarray(times)
new = np.empty(times.shape, dtype='M8[ns]')
for i, t in np.ndenumerate(times):
dt = datetime(t.year, t.month, t.day, t.hour, t.minute, t.second)
new[i] = np.datetime64(dt)
return new
def _cleanup_netcdf_time_units(units):
delta, ref_date = _unpack_netcdf_time_units(units)
try:
units = '%s since %s' % (delta, format_timestamp(ref_date))
except OutOfBoundsDatetime:
# don't worry about reifying the units if they're out of bounds
pass
return units
def _encode_datetime_with_netcdftime(dates, units, calendar):
"""Fallback method for encoding dates using netcdftime.
This method is more flexible than xarray's parsing using datetime64[ns]
arrays but also slower because it loops over each element.
"""
nctime = _import_netcdftime()
if np.issubdtype(dates.dtype, np.datetime64):
# numpy's broken datetime conversion only works for us precision
dates = dates.astype('M8[us]').astype(datetime)
def encode_datetime(d):
return np.nan if d is None else nctime.date2num(d, units, calendar)
return np.vectorize(encode_datetime)(dates)
def cast_to_int_if_safe(num):
int_num = np.array(num, dtype=np.int64)
if (num == int_num).all():
num = int_num
return num
def encode_cf_datetime(dates, units=None, calendar=None):
"""Given an array of datetime objects, returns the tuple `(num, units,
calendar)` suitable for a CF compliant time variable.
Unlike `date2num`, this function can handle datetime64 arrays.
See also
--------
netcdftime.date2num
"""
dates = np.asarray(dates)
if units is None:
units = infer_datetime_units(dates)
else:
units = _cleanup_netcdf_time_units(units)
if calendar is None:
calendar = 'proleptic_gregorian'
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if calendar not in _STANDARD_CALENDARS or dates.dtype.kind == 'O':
# parse with netcdftime instead
raise OutOfBoundsDatetime
assert dates.dtype == 'datetime64[ns]'
delta_units = _netcdf_to_numpy_timeunit(delta)
time_delta = np.timedelta64(1, delta_units).astype('timedelta64[ns]')
ref_date = np.datetime64( | pd.Timestamp(ref_date) | pandas.Timestamp |
"""Tests for dynamic validator."""
from datetime import date, datetime
import numpy as np
import pandas as pd
from delphi_validator.report import ValidationReport
from delphi_validator.dynamic import DynamicValidator
class TestCheckRapidChange:
params = {"data_source": "", "span_length": 1,
"end_date": "2020-09-02", "expected_lag": {}}
def test_same_df(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_df = pd.DataFrame([date.today()] * 5, columns=["time_value"])
ref_df = pd.DataFrame([date.today()] * 5, columns=["time_value"])
validator.check_rapid_change_num_rows(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_0_vs_many(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
time_value = datetime.combine(date.today(), datetime.min.time())
test_df = pd.DataFrame([time_value] * 5, columns=["time_value"])
ref_df = pd.DataFrame([time_value] * 1, columns=["time_value"])
validator.check_rapid_change_num_rows(
test_df, ref_df, time_value, "geo", "signal", report)
assert len(report.raised_errors) == 1
assert "check_rapid_change_num_rows" in [
err.check_data_id[0] for err in report.raised_errors]
class TestCheckAvgValDiffs:
params = {"data_source": "", "span_length": 1,
"end_date": "2020-09-02", "expected_lag": {}}
def test_same_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [1, 1, 1, 2, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_same_se(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [np.nan] * 6, "se": [1, 1, 1, 2, 0, 1],
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_same_n(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [np.nan] * 6, "se": [np.nan] * 6,
"sample_size": [1, 1, 1, 2, 0, 1], "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_same_val_se_n(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [1, 1, 1, 2, 0, 1], "se": [1, 1, 1, 2, 0, 1],
"sample_size": [1, 1, 1, 2, 0, 1], "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_10x_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_data = {"val": [1, 1, 1, 20, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
ref_data = {"val": [1, 1, 1, 2, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(test_data)
ref_df = pd.DataFrame(ref_data)
validator.check_avg_val_vs_reference(
test_df, ref_df,
datetime.combine(date.today(), datetime.min.time()), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_100x_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_data = {"val": [1, 1, 1, 200, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
ref_data = {"val": [1, 1, 1, 2, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(test_data)
ref_df = pd.DataFrame(ref_data)
validator.check_avg_val_vs_reference(
test_df, ref_df,
datetime.combine(date.today(), datetime.min.time()), "geo", "signal", report)
assert len(report.raised_errors) == 1
assert "check_test_vs_reference_avg_changed" in [
err.check_data_id[0] for err in report.raised_errors]
def test_1000x_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_data = {"val": [1, 1, 1, 2000, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
ref_data = {"val": [1, 1, 1, 2, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(test_data)
ref_df = pd.DataFrame(ref_data)
validator.check_avg_val_vs_reference(
test_df, ref_df,
datetime.combine(date.today(), datetime.min.time()), "geo", "signal", report)
assert len(report.raised_errors) == 1
assert "check_test_vs_reference_avg_changed" in [
err.check_data_id[0] for err in report.raised_errors]
class TestDataOutlier:
params = {"data_source": "", "span_length": 1,
"end_date": "2020-09-02", "expected_lag": {}}
pd.set_option("display.max_rows", None, "display.max_columns", None)
# Test to determine outliers based on the row data, has lead and lag outlier
def test_pos_outlier(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
ref_val = [30, 30.28571429, 30.57142857, 30.85714286, 31.14285714,
31.42857143, 31.71428571, 32, 32, 32.14285714,
32.28571429, 32.42857143, 32.57142857, 32.71428571,
32.85714286, 33, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33.28571429, 33.57142857, 33.85714286, 34.14285714]
test_val = [100, 100, 100]
ref_data = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["1"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24", end="2020-10-23")}
test_data = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["1"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24", end="2020-10-26")}
ref_data2 = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["2"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24", end="2020-10-23")}
test_data2 = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["2"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24",end="2020-10-26")}
ref_df = pd.concat([pd.DataFrame(ref_data), pd.DataFrame(ref_data2)]).reset_index(drop=True)
test_df = pd.concat([pd.DataFrame(test_data), pd.DataFrame(test_data2)]). \
reset_index(drop=True)
validator.check_positive_negative_spikes(
test_df, ref_df, "state", "signal", report)
assert len(report.raised_errors) == 1
assert "check_positive_negative_spikes" in [
err.check_data_id[0] for err in report.raised_errors]
def test_neg_outlier(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
ref_val = [100, 101, 100, 101, 100,
100, 100, 100, 100, 100,
100, 102, 100, 100, 100,
100, 100, 101, 100, 100,
100, 100, 100, 99, 100,
100, 98, 100, 100, 100]
test_val = [10, 10, 10]
ref_data = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["1"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24",end="2020-10-23")}
test_data = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["1"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24",end="2020-10-26")}
ref_data2 = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["2"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24",end="2020-10-23")}
test_data2 = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["2"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24",end="2020-10-26")}
ref_df = pd.concat([pd.DataFrame(ref_data), pd.DataFrame(ref_data2)]). \
reset_index(drop=True)
test_df = pd.concat([pd.DataFrame(test_data), pd.DataFrame(test_data2)]). \
reset_index(drop=True)
validator.check_positive_negative_spikes(
test_df, ref_df, "state", "signal", report)
assert len(report.raised_errors) == 1
assert "check_positive_negative_spikes" in [
err.check_data_id[0] for err in report.raised_errors]
def test_zero_outlier(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
ref_val = [30, 30.28571429, 30.57142857, 30.85714286, 31.14285714,
31.42857143, 31.71428571, 32, 32, 32.14285714,
32.28571429, 32.42857143, 32.57142857, 32.71428571,
32.85714286, 33, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33.28571429, 33.57142857, 33.85714286, 34.14285714]
test_val = [0, 0, 0]
ref_data = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["1"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24",end="2020-10-23")}
test_data = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["1"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24",end="2020-10-26")}
ref_data2 = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["2"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24",end="2020-10-23")}
test_data2 = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["2"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24",end="2020-10-26")}
ref_df = pd.concat([pd.DataFrame(ref_data), pd.DataFrame(ref_data2)]). \
reset_index(drop=True)
test_df = pd.concat([ | pd.DataFrame(test_data) | pandas.DataFrame |
# load_ext autoreload
# autoreload 2
# experiment, parsing and math
import cmfg
from Parser import Parser
from sys import argv
import numpy as np
import pickle
import math as m
import pandas as pd
ind_min = 12
ind_max = 22
inifile = '../set/config_TRNT_' + str(ind_min).zfill(3) + '.ini'
config = Parser(inifile)
cnames = config.p._fields
dfa = | pd.DataFrame(columns=cnames) | pandas.DataFrame |
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from GridCal.Engine.Devices.bus import Bus
from GridCal.Engine.Devices.enumerations import BranchType, ConverterControlType
from GridCal.Engine.Devices.editable_device import EditableDevice, DeviceType, GCProp
class UPFC(EditableDevice):
def __init__(self, bus_from: Bus = None, bus_to: Bus = None, name='UPFC', code='', idtag=None, active=True,
rs=0.0, xs=0.00001, rl=0.0, xl=0.0, bl=0.0, rp=0.0, xp=0.0, vp=1.0, Pset = 0.0, Qset=0.0, rate=9999,
mttf=0, mttr=0, cost=100, cost_prof=None, rate_prof=None, active_prof=None, contingency_factor=1.0,
contingency_enabled=True, monitor_loading=True):
"""
Unified Power Flow Converter (UPFC)
:param bus_from:
:param bus_to:
:param name:
:param idtag:
:param active:
:param rs: series resistance (p.u.)
:param xs: series reactance (p.u.)
:param rl: line resistance (p.u.)
:param xl: line reactance (p.u.)
:param bl: line shunt susceptance (p.u.)
:param rp: shunt resistance (p.u.)
:param xp: shunt reactance (p.u.)
:param vp: shunt voltage set point (p.u.)
:param rate: Power rating (MVA)
:param Pset: Power set point (MW)
:param mttf:
:param mttr:
:param cost:
:param cost_prof:
:param rate_prof:
:param active_prof:
"""
EditableDevice.__init__(self,
name=name,
idtag=idtag,
active=active,
code=code,
device_type=DeviceType.UpfcDevice,
editable_headers={'name': GCProp('', str, 'Name of the branch.'),
'idtag': GCProp('', str, 'Unique ID'),
'code': GCProp('', str, 'Secondary ID'),
'bus_from': GCProp('', DeviceType.BusDevice,
'Name of the bus at the "from" side of the branch.'),
'bus_to': GCProp('', DeviceType.BusDevice,
'Name of the bus at the "to" side of the branch.'),
'active': GCProp('', bool, 'Is the branch active?'),
'rate': GCProp('MVA', float, 'Thermal rating power of the branch.'),
'contingency_factor': GCProp('p.u.', float,
'Rating multiplier for contingencies.'),
'contingency_enabled': GCProp('', bool,
'Consider this UPFC for contingencies.'),
'monitor_loading': GCProp('', bool,
'Monitor this device loading for optimization, NTC or contingency studies.'),
'mttf': GCProp('h', float, 'Mean time to failure, '
'used in reliability studies.'),
'mttr': GCProp('h', float, 'Mean time to recovery, '
'used in reliability studies.'),
'Rl': GCProp('p.u.', float, 'Line resistance.'),
'Xl': GCProp('p.u.', float, 'Line reactance.'),
'Bl': GCProp('p.u.', float, 'Line susceptance.'),
'Rs': GCProp('p.u.', float, 'Series resistance.'),
'Xs': GCProp('p.u.', float, 'Series reactance.'),
'Rsh': GCProp('p.u.', float, 'Shunt resistance.'),
'Xsh': GCProp('p.u.', float, 'Shunt resistance.'),
'Vsh': GCProp('p.u.', float, 'Shunt voltage set point.'),
'Pfset': GCProp('MW', float, 'Active power set point.'),
'Qfset': GCProp('MVAr', float, 'Active power set point.'),
'Cost': GCProp('e/MWh', float, 'Cost of overloads. Used in OPF.'),
},
non_editable_attributes=['bus_from', 'bus_to', 'idtag'],
properties_with_profile={'active': 'active_prof',
'rate': 'rate_prof',
'Cost': 'Cost_prof'})
self.bus_from = bus_from
self.bus_to = bus_to
# List of measurements
self.measurements = list()
# total impedance and admittance in p.u.
self.Rl = rl
self.Xl = xl
self.Bl = bl
self.Rs = rs
self.Xs = xs
self.Rsh = rp
self.Xsh = xp
self.Vsh = vp
self.Pfset = Pset
self.Qfset = Qset
self.Cost = cost
self.Cost_prof = cost_prof
self.mttf = mttf
self.mttr = mttr
self.active = active
self.active_prof = active_prof
# branch rating in MVA
self.rate = rate
self.contingency_factor = contingency_factor
self.contingency_enabled: bool = contingency_enabled
self.monitor_loading: bool = monitor_loading
self.rate_prof = rate_prof
# branch type: Line, Transformer, etc...
self.branch_type = BranchType.UPFC
def get_properties_dict(self, version=3):
"""
Get json dictionary
:return:
"""
if version == 2:
return {'id': self.idtag,
'type': 'upfc',
'phases': 'ps',
'name': self.name,
'name_code': self.code,
'bus_from': self.bus_from.idtag,
'bus_to': self.bus_to.idtag,
'active': self.active,
'rate': self.rate,
'rl': self.Rl,
'xl': self.Xl,
'bl': self.Bl,
'rs': self.Rs,
'xs': self.Xs,
'rsh': self.Rsh,
'xsh': self.Xsh,
'vsh': self.Vsh,
'Pfset': self.Pfset,
'Qfset': self.Qfset
}
elif version == 3:
return {'id': self.idtag,
'type': 'upfc',
'phases': 'ps',
'name': self.name,
'name_code': self.code,
'bus_from': self.bus_from.idtag,
'bus_to': self.bus_to.idtag,
'active': self.active,
'rate': self.rate,
'contingency_factor1': self.contingency_factor,
'contingency_factor2': self.contingency_factor,
'contingency_factor3': self.contingency_factor,
'rl': self.Rl,
'xl': self.Xl,
'bl': self.Bl,
'rs': self.Rs,
'xs': self.Xs,
'rsh': self.Rsh,
'xsh': self.Xsh,
'vsh': self.Vsh,
'Pfset': self.Pfset,
'Qfset': self.Qfset
}
else:
return dict()
def get_profiles_dict(self, version=3):
"""
:return:
"""
if self.active_prof is not None:
active_prof = self.active_prof.tolist()
rate_prof = self.rate_prof.tolist()
else:
active_prof = list()
rate_prof = list()
return {'id': self.idtag,
'active': active_prof,
'rate': rate_prof}
def get_units_dict(self, version=3):
"""
Get units of the values
"""
return {'rate': 'MW',
'r': 'p.u.',
'x': 'p.u.',
'b': 'p.u.',
'g': 'p.u.'}
def plot_profiles(self, time_series=None, my_index=0, show_fig=True):
"""
Plot the time series results of this object
:param time_series: TimeSeries Instance
:param my_index: index of this object in the simulation
:param show_fig: Show the figure?
"""
if time_series is not None:
fig = plt.figure(figsize=(12, 8))
ax_1 = fig.add_subplot(211)
ax_2 = fig.add_subplot(212, sharex=ax_1)
x = time_series.results.time
# loading
y = time_series.results.loading.real * 100.0
df = pd.DataFrame(data=y[:, my_index], index=x, columns=[self.name])
ax_1.set_title('Loading', fontsize=14)
ax_1.set_ylabel('Loading [%]', fontsize=11)
df.plot(ax=ax_1)
# losses
y = np.abs(time_series.results.losses)
df = | pd.DataFrame(data=y[:, my_index], index=x, columns=[self.name]) | pandas.DataFrame |
import os
import cv2
import glob
import pickle
import pandas as pd
import xml.etree.ElementTree as ET
from .utils import cache_result
from . import config as cfg
@cache_result(lambda path: 'cache/cached-' + os.path.basename(path) + '.pkl')
def get_ground_truth(path):
df = | pd.read_csv(path) | pandas.read_csv |
'''
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
"""
Los productos que salen del reporte diario son:
3
4
5
7
8
9
10
11
12
13
14
17
20
23
24
26
27
30
36
44
"""
import pandas as pd
from utils import *
from shutil import copyfile
from os import listdir
from os.path import isfile, join
from datetime import datetime, timedelta
import numpy as np
def prod4(fte, producto):
print('Generando producto 4')
now = datetime.now()
today = now.strftime("%Y-%m-%d")
output = producto + today + '-CasosConfirmados-totalRegional.csv'
df = pd.read_csv(fte, quotechar='"', sep=',', thousands=r'.', decimal=",")
df.rename(columns={'Unnamed: 0': 'Region'}, inplace=True)
if 'Unnamed: 7' in df.columns:
df.drop(columns=['Unnamed: 7'], inplace=True)
df_obj = df.select_dtypes(['object'])
df[df_obj.columns] = df_obj.apply(lambda x: x.str.strip())
regionName(df)
df.at[16, 'Region'] = 'Total'
# texttract reconoce 0 como o
df.replace({'O': 0}, inplace=True)
numeric_columns = [x for x in df.columns if x != 'Region']
for i in numeric_columns:
df[i] = df[i].astype(str)
#df[i] = df[i].replace({r'\.': ''}, regex=True)
df[i] = df[i].replace({r'\,': '.'}, regex=True)
df.to_csv(output, index=False)
def prod5(fte, producto):
print('Generando producto 5')
# necesito series a nivel nacional por fecha:
# Casos nuevos con sintomas
# Casos totales
# Casos recuperados #ya no se reporta
# Fallecidos
# Casos activos
# Casos nuevos sin sintomas
# Casos nuevos sin notificar
# Casos activos confirmados
now = datetime.now()
timestamp = now.strftime("%Y-%m-%d")
df_input_file = pd.read_csv(fte + 'CasosConfirmadosTotales.csv')
df_input_file['Fecha'] = pd.to_datetime(df_input_file['Fecha'], format='%d-%m-%Y')
#print(df_input_file.to_string())
#las columnas son :
# Casos totales acumulados Casos nuevos totales Casos nuevos con sintomas Casos nuevos sin sintomas* Casos nuevos sin notificar Fallecidos totales Casos activos confirmados
df_input_file.rename(columns={'Casos totales acumulados': 'Casos totales',
'Casos nuevos totales': 'Casos nuevos totales',
'Casos nuevos con sintomas': 'Casos nuevos con sintomas',
'Casos nuevos sin sintomas*': 'Casos nuevos sin sintomas',
'Fallecidos totales': 'Fallecidos'}, inplace=True)
#print(timestamp)
last_row = df_input_file[df_input_file['Fecha'] == timestamp]
#print(last_row.to_string())
df_output_file = pd.read_csv(producto)
df_output_file = df_output_file.T
df_output_file.columns = df_output_file.iloc[0]
df_output_file.drop(df_output_file.index[0], inplace=True)
df_output_file.index = pd.to_datetime(df_output_file.index, format='%Y-%m-%d')
df_output_file.index.name = 'Fecha'
#print(df_output_file.index)
#print(last_row['Fecha'].values[0])
if last_row['Fecha'].values[0] in df_output_file.index:
print('Fecha was there, overwriting it')
df_output_file.drop(last_row['Fecha'].values[0], axis=0, inplace=True)
#print(df_output_file.to_string())
last_row.index = last_row['Fecha']
last_row.drop(columns=['Fecha'], inplace=True)
df_output_file = df_output_file.append(last_row)
#print(df_output_file.to_string())
else:
print('new date, adding row')
last_row.index = last_row['Fecha']
last_row.drop(columns=['Fecha'], inplace=True)
df_output_file = df_output_file.append(last_row)
################################## <NAME> Demian
# Faltan recuperados por FIS
# el 2 de junio hubo un cambio: Casos activos y recuperados por FIS y FD se calculan a partir de ese dia.
# antes de eso es None
fecha_de_corte = datetime(2020, 6, 2)
correccion_2020_06_17 = datetime(2020, 6, 17)
for i in df_output_file.index:
if i >= fecha_de_corte:
#print(str(i))
# Casos activos por FIS parten el 2 de Junio por definicion y corresponden a los casos activos del reporte diario
df_output_file.loc[i, 'Casos activos por FIS'] = df_output_file.loc[i, 'Casos activos']
# Recuperados FIS se calculan restando fallecidos y activos FIS
df_output_file.loc[i, 'Casos recuperados por FIS'] = \
df_output_file.loc[i, 'Casos totales'] - \
df_output_file.loc[i, 'Casos activos'] - \
df_output_file.loc[i, 'Fallecidos']
# Falta casos activos y recuperados por FD: ocupar numeros antiguos para calcular
fourteen_days = timedelta(days=14)
# Casos activos por FD = casos activos hasta el 2 de Junio. Antes de eso se copian casos activos
#df.loc[i, 'C'] = df.loc[i - 1, 'C'] * df.loc[i, 'A'] + df.loc[i, 'B']
#print(i)
if (i - fourteen_days) in df_output_file.index:
#print('14 days ago is on the df')
df_output_file.loc[i, 'Casos activos por FD'] = df_output_file.loc[i, 'Casos totales'] - \
df_output_file.loc[i - fourteen_days, 'Casos totales']
if i == correccion_2020_06_17:
# activos de hoy = activos de ayer + casos de hoy - casos (t-14) - muertos hoy . Les daría 75346 ...
print('Corrigiendo el 2020-06-17')
df_output_file.loc[i, 'Casos activos por FD'] = \
df_output_file.loc[i - timedelta(days=1), 'Casos activos por FD'] +\
df_output_file.loc[i, 'Casos nuevos totales'] -\
df_output_file.loc[i - fourteen_days, 'Casos nuevos totales'] -\
df_output_file.loc[i, 'Fallecidos'] + \
df_output_file.loc[i - timedelta(days=1), 'Fallecidos']
print('Casos activos por FD hoy: ' + str(df_output_file.loc[i, 'Casos activos por FD']))
print('Casos activos ayer: ' + str(df_output_file.loc[i - timedelta(days=1), 'Casos activos por FD']))
print('Casoso nuevos totales hoy : ' + str(df_output_file.loc[i, 'Casos nuevos totales']))
print('Casos totales 14 dias atras: ' + str(df_output_file.loc[i - fourteen_days, 'Casos nuevos totales']))
print('Fallecidos hoy: ' + str(df_output_file.loc[i, 'Fallecidos']))
print('Fallecidos ayer: ' + str(df_output_file.loc[i - timedelta(days=1), 'Fallecidos']))
else:
print(str(i) + ' has no data 14 days ago')
#df_output_file.loc[i, 'Casos activos por FD'] = df_output_file['Casos totales'] - \
# df_output_file.loc[i - fourteen_days_ago, 'Casos totales']
# Es igual a recuperados hasta el 1 de junio (inclusive), desde el 2 se calcula
# Recuperados FD se calculan restando fallecidos y activos FD
df_output_file.loc[i, 'Casos recuperados por FD'] = (
df_output_file.loc[i, 'Casos totales'] -
df_output_file.loc[i, 'Casos activos por FD'] -
df_output_file.loc[i, 'Fallecidos'])
# lo que pasa antes de la fecha de corte
else:
# Casos activos por FD = casos activos hasta el 2 de Junio. Antes de eso se copian casos activos
df_output_file.loc[i, 'Casos activos por FD'] = df_output_file.loc[i, 'Casos activos']
df_output_file.loc[i, 'Casos activos por FIS'] = np.NaN
df_output_file.loc[i, 'Casos recuperados por FIS'] = np.NaN
df_output_file.loc[i, 'Casos recuperados por FD'] = df_output_file.loc[i, 'Casos recuperados']
################################## Lo de Demian
df_output_file.sort_index(inplace=True)
totales = df_output_file.T
#print(totales.to_string())
#print(totales.columns[1:])
## esto es estandar
#totales = pd.read_csv(producto)
#print(totales.columns.dtype)
totales.columns = totales.columns.astype(str)
#print(totales.to_string())
totales.to_csv(producto, index_label='Fecha')
totales_t = totales.transpose()
totales_t.to_csv(producto.replace('.csv', '_T.csv'))
#print(totales.to_string())
df_std = pd.melt(totales.reset_index(), id_vars='index', value_vars=totales.columns)
#df_std = pd.read_csv(producto.replace('.csv', '_T.csv'))
df_std.rename(columns={'index': 'Dato', 'value': 'Total'}, inplace=True)
#print(df_std.to_string())
df_std.to_csv(producto.replace('.csv', '_std.csv'), index=False)
def prod3_13_14_26_27(fte):
onlyfiles = [f for f in listdir(fte) if isfile(join(fte, f))]
cumulativoCasosNuevos = pd.DataFrame({'Region': [],
'Casos nuevos': []})
cumulativoCasosTotales = pd.DataFrame({'Region': [],
'Casos totales': []})
cumulativoFallecidos = pd.DataFrame({'Region': [],
'Fallecidos': []})
casosNuevosConSintomas = pd.DataFrame({'Region': [],
'Fecha': []})
casosNuevosSinSintomas = pd.DataFrame({'Region': [],
'Fecha': []})
casosNuevosSinNotificar = pd.DataFrame({'Region': [],
'Fecha': []})
onlyfiles.sort()
onlyfiles.remove('README.md')
for eachfile in onlyfiles:
print('processing ' + eachfile)
date = eachfile.replace("-CasosConfirmados-totalRegional", "").replace(".csv", "")
dataframe = pd.read_csv(fte + eachfile)
# sanitize headers
#print(eachfile)
dataframe.rename(columns={'Región': 'Region'}, inplace=True)
dataframe.rename(columns={'Casos nuevos': 'Casos nuevos'}, inplace=True)
dataframe.rename(columns={' Casos nuevos': 'Casos nuevos'}, inplace=True)
dataframe.rename(columns={'Casos nuevos totales': 'Casos nuevos'}, inplace=True)
dataframe.rename(columns={'Casos nuevos totales ': 'Casos nuevos'}, inplace=True)
dataframe.rename(columns={'Casos nuevos totales': 'Casos nuevos'}, inplace=True)
dataframe.rename(columns={'Casos totales': 'Casos totales'}, inplace=True)
dataframe.rename(columns={' Casos totales': 'Casos totales'}, inplace=True)
dataframe.rename(columns={'Casos totales acumulados': 'Casos totales'}, inplace=True)
dataframe.rename(columns={'Casos totales acumulados ': 'Casos totales'}, inplace=True)
dataframe.rename(columns={'Casos totales acumulados': 'Casos totales'}, inplace=True)
dataframe.rename(columns={' Casos fallecidos': 'Fallecidos'}, inplace=True)
dataframe.rename(columns={'Fallecidos totales ': 'Fallecidos'}, inplace=True)
dataframe.rename(columns={'Fallecidos totales': 'Fallecidos'}, inplace=True)
dataframe.rename(columns={'Casos nuevos con síntomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos con síntomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos con síntomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos con sintomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos con sintomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos con sintomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos con sintomas ': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin síntomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos sin síntomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin síntomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin síntomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos sin síntomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin síntomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin sintomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos sin sintomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin sintomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin sintomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos sin sintomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin sintomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin sintomas* ': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin notificar': 'Casos nuevos sin notificar'}, inplace=True)
dataframe.rename(columns={' Casos nuevos sin notificar': 'Casos nuevos sin notificar'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin notificar': 'Casos nuevos sin notificar'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin notificar**': 'Casos nuevos sin notificar'}, inplace=True)
dataframe.rename(columns={' Casos nuevos sin notificar**': 'Casos nuevos sin notificar'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin notificar**': 'Casos nuevos sin notificar'}, inplace=True)
if cumulativoCasosNuevos['Region'].empty:
cumulativoCasosNuevos[['Region', 'Casos nuevos']] = dataframe[['Region', 'Casos nuevos']]
cumulativoCasosNuevos.rename(columns={'Casos nuevos': date}, inplace=True)
cumulativoCasosTotales[['Region', 'Casos totales']] = dataframe[['Region', 'Casos totales']]
cumulativoCasosTotales.rename(columns={'Casos totales': date}, inplace=True)
else:
#print(dataframe.columns)
cumulativoCasosNuevos[date] = dataframe['Casos nuevos']
cumulativoCasosTotales[date] = dataframe['Casos totales']
if 'Fallecidos' in dataframe.columns:
if cumulativoFallecidos['Region'].empty:
cumulativoFallecidos[['Region', 'Fallecidos']] = dataframe[['Region', 'Fallecidos']]
cumulativoFallecidos.rename(columns={'Fallecidos': date}, inplace=True)
else:
cumulativoFallecidos[date] = dataframe['Fallecidos']
if 'Casos nuevos con sintomas' in dataframe.columns:
if casosNuevosConSintomas['Region'].empty:
casosNuevosConSintomas[['Region', 'Fecha']] = dataframe[['Region', 'Casos nuevos con sintomas']]
casosNuevosConSintomas.rename(columns={'Fecha': date}, inplace=True)
else:
casosNuevosConSintomas[date] = dataframe['Casos nuevos con sintomas']
else:
date2 = (pd.to_datetime(date)).strftime('%Y-%m-%d')
if date2 < '2020-04-29':
if casosNuevosConSintomas['Region'].empty:
casosNuevosConSintomas[['Region', 'Fecha']] = dataframe[['Region','Casos nuevos']]
casosNuevosConSintomas.rename(columns={'Fecha': date}, inplace=True)
else:
casosNuevosConSintomas[date] = dataframe['Casos nuevos']
if 'Casos nuevos sin sintomas' in dataframe.columns:
if casosNuevosSinSintomas['Region'].empty:
casosNuevosSinSintomas[['Region', 'Fecha']] = dataframe[['Region', 'Casos nuevos sin sintomas']]
casosNuevosSinSintomas.rename(columns={'Fecha': date}, inplace=True)
else:
casosNuevosSinSintomas[date] = dataframe['Casos nuevos sin sintomas']
if 'Casos nuevos sin notificar' in dataframe.columns:
if casosNuevosSinNotificar['Region'].empty:
casosNuevosSinNotificar[['Region', 'Fecha']] = dataframe[['Region', 'Casos nuevos sin notificar']]
casosNuevosSinNotificar.rename(columns={'Fecha': date}, inplace=True)
else:
casosNuevosSinNotificar[date] = dataframe['Casos nuevos sin notificar']
# estandarizar nombres de regiones
regionName(cumulativoCasosNuevos)
regionName(cumulativoCasosTotales)
regionName(cumulativoFallecidos)
regionName(casosNuevosConSintomas)
regionName(casosNuevosSinSintomas)
regionName(casosNuevosSinNotificar)
cumulativoCasosNuevos_T = cumulativoCasosNuevos.transpose()
cumulativoCasosTotales_T = cumulativoCasosTotales.transpose()
cumulativoFallecidos_T = cumulativoFallecidos.transpose()
casosNuevosConSintomas_T = casosNuevosConSintomas.transpose()
casosNuevosSinSintomas_T = casosNuevosSinSintomas.transpose()
casosNuevosSinNotificar_T = casosNuevosSinNotificar.transpose()
#### PRODUCTO 3
names = ['Casos acumulados','Casos nuevos totales','Casos nuevos con sintomas','Casos nuevos sin sintomas',
'Casos nuevos sin notificar','Fallecidos totales']
frames = [cumulativoCasosTotales,cumulativoCasosNuevos,casosNuevosConSintomas,casosNuevosSinSintomas,
casosNuevosSinNotificar,cumulativoFallecidos]
for i in range(len(names)):
list = [names[i]]*len(cumulativoCasosTotales)
temp = pd.DataFrame.copy(frames[i])
temp.insert(1,'Categoria',list)
if i == 0: TotalesPorRegion = temp
if i > 0:
TotalesPorRegion = pd.concat([TotalesPorRegion,temp],axis=0)
TotalesPorRegion = TotalesPorRegion.fillna('')
TotalesPorRegion_T = TotalesPorRegion.transpose()
TotalesPorRegion.to_csv('../output/producto3/TotalesPorRegion.csv', index=False)
TotalesPorRegion_T.to_csv('../output/producto3/TotalesPorRegion_T.csv', header=False)
identifiers = ['Region','Categoria']
variables = [x for x in TotalesPorRegion.columns if x not in identifiers]
df_std = pd.melt(TotalesPorRegion, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Total')
df_std.to_csv('../output/producto3/TotalesPorRegion_std.csv', index=False)
cumulativoCasosTotales.to_csv('../output/producto3/CasosTotalesCumulativo.csv', index=False)
cumulativoCasosTotales_T.to_csv('../output/producto3/CasosTotalesCumulativo_T.csv', header=False)
identifiers = ['Region']
variables = [x for x in cumulativoCasosTotales.columns if x not in identifiers]
df_std = pd.melt(cumulativoCasosTotales, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Total')
df_std.to_csv('../output/producto3/CasosTotalesCumulativo_std.csv', index=False)
#### PRODUCTO 13
cumulativoCasosNuevos.to_csv('../output/producto13/CasosNuevosCumulativo.csv', index=False)
cumulativoCasosNuevos_T.to_csv('../output/producto13/CasosNuevosCumulativo_T.csv', header=False)
identifiers = ['Region']
variables = [x for x in cumulativoCasosTotales.columns if x not in identifiers]
df_std = pd.melt(cumulativoCasosNuevos, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Total')
df_std.to_csv('../output/producto13/CasosNuevosCumulativo_std.csv', index=False)
#### PRODUCTO 14
cumulativoFallecidos.to_csv('../output/producto14/FallecidosCumulativo.csv', index=False)
cumulativoFallecidos_T.to_csv('../output/producto14/FallecidosCumulativo_T.csv', header=False)
identifiers = ['Region']
variables = [x for x in cumulativoFallecidos.columns if x not in identifiers]
df_std = pd.melt(cumulativoFallecidos, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Total')
df_std.to_csv('../output/producto14/FallecidosCumulativo_std.csv', index=False)
#### PRODUCTO 26
casosNuevosConSintomas.to_csv('../output/producto26/CasosNuevosConSintomas.csv', index=False)
casosNuevosConSintomas_T.to_csv('../output/producto26/CasosNuevosConSintomas_T.csv', header=False)
identifiers = ['Region']
variables = [x for x in casosNuevosConSintomas.columns if x not in identifiers]
df_std = pd.melt(casosNuevosConSintomas, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Casos confirmados')
df_std.to_csv('../output/producto26/CasosNuevosConSintomas_std.csv', index=False)
#### PRODUCTO 27
casosNuevosSinSintomas.to_csv('../output/producto27/CasosNuevosSinSintomas.csv', index=False)
casosNuevosSinSintomas_T.to_csv('../output/producto27/CasosNuevosSinSintomas_T.csv', header=False)
identifiers = ['Region']
variables = [x for x in casosNuevosSinSintomas.columns if x not in identifiers]
df_std = pd.melt(casosNuevosSinSintomas, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Casos confirmados')
df_std.to_csv('../output/producto27/CasosNuevosSinSintomas_std.csv', index=False)
def prod7_8(fte, producto):
df = pd.read_csv(fte, dtype={'Codigo region': object})
regionName(df)
df = df.replace('-', '', regex=True)
df_t = df.T
df.to_csv(producto + '.csv', index=False)
df_t.to_csv(producto + '_T.csv', header=False)
identifiers = ['Region', 'Codigo region', 'Poblacion']
variables = [x for x in df.columns if x not in identifiers]
df_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='fecha', value_name='numero')
df_std.to_csv(producto + '_std.csv', index=False)
def prod9_10(fte, producto):
copyfile(fte, producto + '.csv')
HospitalizadosUCIEtario_T = transpone_csv(producto + '.csv')
HospitalizadosUCIEtario_T.to_csv(producto + '_T.csv', header=False)
df = pd.read_csv(fte)
identifiers = ['Grupo de edad']
variables = [x for x in df.columns if x not in identifiers]
df_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='fecha', value_name='Casos confirmados')
df_std.to_csv(producto + '_std.csv', index=False)
def prod17(fte, producto):
copyfile(fte, producto + '.csv')
df = pd.read_csv(fte)
df_t = df.T
df_t.to_csv(producto + '_T.csv', header=False)
identifiers = ['Establecimiento', 'Examenes']
variables = [x for x in df.columns if x not in identifiers]
df_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='fecha', value_name='Numero de PCR')
df_std.to_csv(producto + '_std.csv', index=False)
def prod20(fte, producto):
copyfile(fte, producto + '.csv')
df = pd.read_csv(fte)
df_t = df.T
df_t.to_csv(producto + '_T.csv', header=False)
identifiers = ['Ventiladores']
variables = [x for x in df.columns if x not in identifiers]
df_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='fecha', value_name='numero')
df_std.to_csv(producto + '_std.csv', index=False)
def prod23(fte, producto):
copyfile(fte, producto + '.csv')
df = pd.read_csv(fte)
df_t = df.T
df_t.to_csv(producto + '_T.csv', header=False)
identifiers = ['Casos']
variables = [x for x in df.columns if x not in identifiers]
df_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='fecha', value_name='Casos confirmados')
df_std.to_csv(producto + '_std.csv', index=False)
def prod24(fte, producto):
copyfile(fte, producto + '.csv')
df = pd.read_csv(fte)
df_t = df.T
df_t.to_csv(producto + '_T.csv', header=False)
identifiers = ['Tipo de cama']
variables = [x for x in df.columns if x not in identifiers]
df_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='fecha', value_name='Casos confirmados')
df_std.to_csv(producto + '_std.csv', index=False)
def prod30(fte, producto):
copyfile(fte, producto + '.csv')
df = pd.read_csv(fte)
df_t = df.T
df_t.to_csv(producto + '_T.csv', header=False)
identifiers = ['Casos']
variables = [x for x in df.columns if x not in identifiers]
df_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='Fecha', value_name='Casos confirmados')
df_std.to_csv(producto + '_std.csv', index=False)
def prod36(fte, producto):
copyfile(fte, producto + '.csv')
df = | pd.read_csv(fte) | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: src/01_Clustering.ipynb (unless otherwise specified).
__all__ = ['module_path', 'get_alpha_shape', 'set_colinear', 'collinear', 'get_segments', 'get_polygons_buf',
'labels_filtra', 'levels_from_strings', 'get_tag_level_df_labels', 'level_tag', 'get_dics_labels',
'get_label_clusters_df', 'get_mini_jaccars', 'jaccard_distance', 'mod_cid_label', 'retag_originals',
'clustering', 'recursive_clustering', 'recursive_clustering_tree', 'compute_dbscan', 'adaptative_DBSCAN',
'compute_hdbscan', 'compute_OPTICS', 'compute_Natural_cities', 'SSM', 'get_tree_from_clustering',
'generate_tree_clusterize_form']
# Cell
#export
import os
import sys
import numpy as np
import pandas as pd
import kneed
import itertools
import shapely
import random
import time
import re
from CGAL.CGAL_Alpha_shape_2 import *
from CGAL.CGAL_Kernel import Point_2
from sklearn.cluster import DBSCAN, OPTICS
from sklearn.preprocessing import StandardScaler
from shapely.geometry import LineString
from shapely.ops import polygonize, cascaded_union
from shapely.geometry import box
from shapely.geometry import Point, Polygon, MultiPolygon
from shapely.ops import polygonize_full, linemerge, unary_union
from scipy.spatial import cKDTree, Delaunay
import hdbscan
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from .TreeClusters import *
# Cell
#export
def get_alpha_shape(point_list):
"""
Returns a polygon representing the hull of the points sample.
:param list point_list: list list of tuples with samples coordinates.
:returns shapely.Polygon: concave hull shapely polygon
"""
uni_po = np.unique(point_list, axis=0)
if len(uni_po) < 3:
raise ValueError('Alpha Shape needs more than 3 points')
if set_colinear(uni_po) == True:
raise ValueError('The set of points can be colinear')
list_of_points = [Point_2(l[0], l[1]) for l in point_list]
a = Alpha_shape_2()
a.make_alpha_shape(list_of_points)
a.set_mode(REGULARIZED)
alpha = a.find_optimal_alpha(1).next()
a.set_alpha(alpha)
edges = []
for it in a.alpha_shape_edges():
edges.append(a.segment(it))
lines = []
for e in edges:
source_p = (e.source().x(), e.source().y())
target_p = (e.target().x(), e.target().y())
lines.append(LineString([source_p, target_p]))
return cascaded_union(list(polygonize(lines)))
# Cell
#export
def set_colinear(list_points):
"""
Check if in the list of points any of triplet of points
is colinear
:param list list_points: List of shapely Points
:returns bool: True if all are not colinear
"""
for i in itertools.combinations(list_points, 3):
if collinear(i[0], i[1], i[2]) == False:
return False
return True
# Cell
#export
def collinear(p1, p2, p3):
"""
Check if the points are colinear
:param shapely Point p1: point to chek if is colinear
:param shapely Point p2: point to chek if is colinear
:param shapely Point p3: point to chek if is colinear
:return bool: True if are colinear
"""
return (p1[1]-p2[1]) * (p1[0]-p3[0]) == (p1[1]-p3[1])*(p1[0]-p2[0])
# Cell
#export
def get_segments(points):
"""
Get the segments from a delaunay triangulation
:param points: Point to get Delaunay triangulation and exctract points
:return edges:
"""
TIN = Delaunay(points)
# list of coordinates for each edge
edges = []
for tr in TIN.simplices:
for i in range(3):
edge_idx0 = tr[i]
edge_idx1 = tr[(i+1) % 3]
edges.append(LineString((Point(TIN.points[edge_idx0]),
Point(TIN.points[edge_idx1]))))
return edges
# Cell
#export
def get_polygons_buf(lines):
"""
Obtain the poligons from the lines
:param list lines: List of lines
:returns shapely polygon: the union of the union of
edges (Polygon or multypolygon)
"""
linework = linemerge(lines)
linework = unary_union(linework)
result, _, _, _ = polygonize_full(linework)
result = unary_union(result)
result = result.buffer(0.0000001)
return result
# Cell
#export
def labels_filtra(point_points, multy_pol):
"""
Labels the points in the multy_pol if no polygon contains
a point is label as -1
:param shapely MultyPoint point_points: Points to check
:param multy_pol
:returns np.array: Label array with -1 if is not contained
in a polygon
"""
point_Po = [Point(i) for i in point_points]
labels_p=[]
if type(multy_pol)==shapely.geometry.MultiPolygon :
for po in point_Po:
if multy_pol.contains(po):
for num_pol, poly in enumerate( multy_pol):
if poly.contains(po):
labels_p.append(num_pol)
break
else:
labels_p.append(-1)
elif type(multy_pol)==shapely.geometry.Polygon :
for po in point_Po:
if multy_pol.contains(po):
labels_p.append(0)
else:
labels_p.append(-1)
else:
raise ValueError('The input is not MultiPolygon or Polygon type')
return np.array(labels_p)
# Cell
#export
def levels_from_strings(
string_tag,
level_str='l_',
node_str = 'n_',
**kwargs
):
"""
Returns the levels and the node id using the expected strings
that identify the level id and node id
:param str level_str: string for the level
:param str node_str: string for the nodes
:returns tuple (levels, nodeid):
"""
positions = [i.start() for i in re.finditer( level_str, string_tag )]
levels = [string_tag[i+len(level_str)] for i in positions ]
nodeid_positions = [i.start() for i in re.finditer( node_str, string_tag )]
nodeid = [string_tag[i+len(node_str)] for i in nodeid_positions ]
return levels, nodeid
# Cell
#export
def get_tag_level_df_labels(df, levels_int ):
"""
Get the tag for the cluster
:param Pandas.DataFrame df:
:param int levels_int:
:returns None:
"""
for i in range(levels_int):
df['level_'+ str(i) +'_cluster']= df['cluster_id'].apply(lambda l: level_tag(l,i))
# Cell
#export
def level_tag(list_tags, level_int ):
"""
Tags if the are nois or signal
"""
if len(list_tags)==0:
return 'noise'
try:
return list_tags[level_int]
except:
return 'noise'
# Cell
#export
def get_dics_labels(tree_or, tree_res, level_get):
"""
Obtains a list of dictionaries to retag the original tree_tag with their
correspondance in the tree_res on level level_get +1
:param tree_or:
:param tree_res:
:param level_get:
:param return list:
"""
dic_list_levels= []
for i in range(level_get):
dic_level_df = get_label_clusters_df(tree_or, tree_res, i)
## Eliminate the clusters with nan
dic_level_df.dropna(axis=0, subset=['Sim_cluster'], inplace=True)
dic_lev = dic_level_df['Sim_cluster'].to_dict()
dic_list_levels.append({'level_ori':'level_'+str(i)+'_cluster', 'dict': dic_lev})
return dic_list_levels
# Cell
#export
def get_label_clusters_df(tree_1, tree_2, level_int):
"""
Obtains the dataframe with the label
:param TreeClusters tree_1:
:param TreeClusters tree_2:
:param int level_int:
:reutrns Pandas.DataFrame df_level_clus:
"""
level_all = tree_1.get_level(level_int)
df_level_clus = pd.DataFrame(level_all, columns=['Clusters'])
df_level_clus['Area'] = df_level_clus['Clusters'].apply(lambda l: l.polygon_cluster.area)
df_level_clus['Name'] = df_level_clus['Clusters'].apply(lambda l: l.name)
df_level_clus['Sim_cluster'] = df_level_clus['Clusters'].apply(lambda l: get_mini_jaccars(l, tree_2,level_int+1)) ###### Como se hacen las clusterizaciones se debe usar el siguiente nivel
#print('', df_level_clus['Sim_cluster'].dtype)
df_level_clus= df_level_clus.sort_values(by ='Area', ascending=False)
df_level_clus['Sim_cluster'] = (df_level_clus['Sim_cluster']
.where(~df_level_clus.duplicated(subset=['Sim_cluster']), None))
#print(df_level_clus['Sim_cluster'].dtype)
level_2= tree_2.get_level(level_int+1)
df_level_clus['Sim_cluster_name'] =(df_level_clus['Sim_cluster']
.astype('int32', errors='ignore')
.replace({np.nan: ''})
.apply(lambda l: level_2[int(l)].name if l !='' else None) )
return df_level_clus
# Cell
#export
def get_mini_jaccars(cluster, tree_2, level_int):
"""
Find the most simmilar cluster in the tree_2 at level level_int
returns int the index of the most similar polygon in the level
"""
tree_2_level= tree_2.get_level(level_int)
Jaccard_i= [jaccard_distance(cluster.polygon_cluster, j.polygon_cluster) for j in tree_2_level]
valu_min = Jaccard_i.index( min(Jaccard_i))
return valu_min
# Cell
#export
def jaccard_distance(p1, p2):
"""
Computes the Jaccard similarity measuremen between two polygons.
param: p1 shapely Poligon
param: p2 shapely Poligon
return float Jaccard distance
"""
intersection_area = p1.intersection(p2).area
#print(intersection_area)
jacc= 1 - (intersection_area)/(p1.area + p2.area - intersection_area)
return jacc
# Cell
#export
def mod_cid_label(dic_label):
"""
"""
dic_label={str(k):str(v) for k,v in dic_label.items()}
dic_label['noise'] = 'noise'
return dic_label
# Cell
#export
def retag_originals(df_fram_or , df_results, tag_original, tag_results, dic_tag_or_res):
"""
Retags the labels in the df_fram_or using the dictionary dic_tag_or_res to match
the tags with the corresponding tag in the df_result and all the labels that are
not in the dictionary generate a new tag fo them.
:param Pandas.DataFrame df_fram_or
:param Pandas.DataFrame df_results
:param tag_original
:param tag_results
:param Pandas.DataFrame dic_tag_or_res
"""
tag_plus= len(df_results[tag_results].unique()) +100 - len(df_results[tag_results].unique())%100
df_fram_or['re_tag_'+str(df_results.name)+'_'+tag_original] = df_fram_or[tag_original].apply(lambda l: dic_tag_or_res[l] if l in dic_tag_or_res.keys() else str(int(l) +tag_plus) )
# Cell
def clustering(
t_next_level_2,
level=None,
algorithm='dbscan',
**kwargs
):
"""Function to get the clusters for single group by
:param t_next_level_2: Dictionary with the points to compute the
cluster
:param level: None Level to compute (Default None)
:param str algorithm : Algorithm type is supported (Default= 'dbscan')
:param int min_points_cluster: minimun number of point to consider a cluster(Default 50)
:param double eps: Epsilon parameter In case is needed
:param bool return_noise: To return the noise (Default False)
:param bool verbose: Printing (Dafault False)
:returns list t_next_level_n: A list with dictionaries with the points, the parent, and nois
"""
verbose= kwargs.get('verbose',False)
min_points = kwargs.get( 'min_points_cluster', 50)
ret_noise= kwargs.get('return_noise', True)
eps = kwargs.get('eps',0.8) # Epsilon value to dbscan
t_next_level_n = []
if level == None:
level = 0
for li_num, cluster_list_D in enumerate(t_next_level_2):
cluster_list = cluster_list_D['points']
cluster_list_pa = cluster_list_D['parent']
if verbose:
print("Size cluster list: ", len(cluster_list))
for c_num, cluster in enumerate(cluster_list):
if verbose:
print("Size cluster: ", len(cluster))
print('Algorithm: ', algorithm)
if len(cluster) > 5:
if algorithm == 'dbscan':
if verbose:
print("Epsilon Value: ", eps)
tmp = compute_dbscan(cluster,
eps_DBSCAN = eps,
debugg=verbose,
**kwargs)
if ret_noise:
noise_points = tmp[1]
tmp = tmp[0]
elif algorithm == 'hdbscan':
tmp = compute_hdbscan(cluster,
**kwargs)
if ret_noise:
noise_points = tmp[1]
tmp = tmp[0]
##########
elif algorithm == 'adaptative_DBSCAN':
#### If the number of cluster is too small
tmp = adaptative_DBSCAN(cluster, **kwargs)
if ret_noise:
noise_points = tmp[1]
tmp = tmp[0]
elif algorithm == 'optics':
tmp = compute_OPTICS(cluster,
eps_OPTICS = eps,
**kwargs)
if ret_noise:
noise_points = tmp[1]
tmp = tmp[0]
##########
elif algorithm == 'natural_cities':
tmp = compute_Natural_cities(cluster,
**kwargs)
if ret_noise:
noise_points = tmp[1]
tmp = tmp[0]
##########
else:
raise ValueError('Algorithm must be dbscan or hdbscan')
# sys.exit("1")
if verbose:
print("The number of resulting clusters is : ", len(tmp))
if ret_noise:
dic_clos = {'points': tmp,
'parent': cluster_list_pa + '_L_'+str(level) +
'_l_' + str(li_num) + '_c_'+str(c_num),
'noise_points':noise_points
}
else:
dic_clos = {'points': tmp, 'parent': cluster_list_pa +
'_L_'+str(level) + '_l_' + str(li_num) + '_c_'+str(c_num)}
t_next_level_n.append(dic_clos)
else:
if ret_noise:
dic_clos = {'points': [],
'parent': cluster_list_pa + '_L_'+str(level) +
'_l_' + str(li_num) + '_c_'+str(c_num),
'noise_points':cluster
}
else:
dic_clos = {'points': [], 'parent': cluster_list_pa +
'_L_'+str(level) + '_l_' + str(li_num) + '_c_'+str(c_num)}
t_next_level_n.append(dic_clos)
return t_next_level_n
# Cell
def recursive_clustering(
this_level, # Dictionary with Points
to_process, # levels to process
cluster_tree, # to store the clusters
level = 0, # current level
**kwargs
):
"""
Performs the recursive clustering.
Calls compute_dbscan for each
list of clusters keepen the structure and then calls itself
until no more clusters satisfy the condition
:param dict this_level: level is the current level
:param int to_process: the max level to process
:param double eps: The epsilon parameter distance to pass to the needed algorithm
:param list cluster_tree : list of list to insert the levels
:param bool verbose : To print
:param double decay: In the use of dbscan the deacy parameter to reduce eps
:param int min_points_cluster: The min point for each cluster to pass to algorithm
:param str algorithm: The string of the algorithm name to use
"""
algorithm= kwargs.get('algorithm' ,'dbscan') # Algorithm to use
verbose= kwargs.get('verbose',False)
min_points = kwargs.get( 'min_points_cluster', 50)
decay = kwargs.get('decay', 0.7)
eps = kwargs.get('eps' ,0.8) # Epsilon distance to DBSCAN parameter
max_k_increase = kwargs.get('max_k_increase', None)
tmp = None
if level == 0:
kwargs['eps'] = eps
else:
kwargs['eps'] = eps * decay
if max_k_increase != None:
if level == 0:
kwargs['max_k_percent'] = 0.1
else:
kwargs['max_k_percent'] = kwargs['max_k_percent'] * max_k_increase
cluster_result_polygons = []
if level > to_process:
if verbose:
print('Done clustering')
return
######## Get the clusters for the current list of points
all_l = clustering(
this_level,
level=level,
**kwargs
)
##########
cluster_tree.append(all_l)
cluster_n = 0
for i in all_l:
cluster_n += len(i['points'])
if verbose:
print('At level ', level, ' the number of lists are ',
len(all_l), ' with ', cluster_n, 'clusters')
level += 1
if len(all_l) > 0:
return recursive_clustering(all_l,
to_process=to_process,
cluster_tree=cluster_tree,
level= level,
**kwargs
)
else:
if verbose:
print('done clustering')
return
# Cell
def recursive_clustering_tree(dic_points_ori, **kwargs):
"""
Obtaing the recursive tree using a specific algorithm
"""
levels_clustering= kwargs.get('levels_clustering',4)
cluster_tree = []
recursive_clustering([dic_points_ori], # Dictionary with Points
levels_clustering, # levels to process
cluster_tree, # to store the clusters
level=0, # current level
**kwargs
)
tree_clus= get_tree_from_clustering(cluster_tree)
tree_from_clus= TreeClusters()
tree_from_clus.levels_nodes = tree_clus
tree_from_clus.root= tree_from_clus.levels_nodes[0][0]
return tree_from_clus
# Cell
def compute_dbscan(cluster, **kwargs):
"""
Sklearn DBSCAN wrapper.
:param cluster: a (N,2) numpy array containing the obsevations
:returns list with numpy arrays for all the clusters obtained
"""
eps = kwargs.get( 'eps_DBSCAN',.04)
debugg= kwargs.get( 'debugg',False)
min_samples= kwargs.get( 'min_samples',50)
ret_noise = kwargs.get('return_noise', False)
# Standarize sample
scaler = StandardScaler()
cluster = scaler.fit_transform(cluster)
if debugg:
print('epsilon distance to DBSCAN: ', eps)
print("min_samples to DBScan: ", min_samples )
print("Number of points to fit the DBScan: ",cluster.shape[0])
db = DBSCAN(eps=eps, min_samples=min_samples).fit(cluster) # Check if can be run with n_jobs = -1
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
l_unique_labels = len(set(labels)) - (1 if -1 in labels else 0)
unique_labels = set(labels)
cluster = scaler.inverse_transform(cluster)
clusters = []
if debugg:
print('Number of clusters:' ,l_unique_labels)
for l in unique_labels:
if l != -1:
class_member_mask = (labels == l)
clusters.append(cluster[class_member_mask])
elif l == -1 and debugg == True:
class_member_mask = (labels == l)
print("Muestras consideradas ruido: ", sum(class_member_mask))
if ret_noise == True:
class_member_mask = (labels == -1)
return clusters, cluster[class_member_mask]
return clusters
# Cell
def adaptative_DBSCAN(points2_clusters ,
**kwargs):
"""
The function use the knee and average to obtain a good value for epsilon and use
DBSCAN to obtain the clusters
:param list Points points2_clusters: Point to clusterize
:param int max_k: = (Default = len(points2_clusters)*.1)
:param int min_k: (Default =50)
:param int step_k: (Default = 50)
:param int leaf_size: (Default = 50)
:param bool scale_points: (Default = True)
:param bool debugg: (Default = False)
:param bool ret_noise: (Default = True)
:returns list : list of cluster. If ret_noise = True return tuple list of cluter and noise
"""
max_k = kwargs.get('max_k', int(len(points2_clusters)*.1))
max_k_percent = kwargs.get('max_k_percent', None)
min_k = kwargs.get('min_k', 50)
step_k = kwargs.get('step_k', 50)
leaf_size = kwargs.get('leaf_size',50)
scale_points= kwargs.get('scale_points',True)
debugg = kwargs.get('verbose',False)
ret_noise = kwargs.get('return_noise', True)
###### Se tienen que hacer algunos cambios para cuando
# los clusters son menores a los minimos establecidos previemente
##### Establecer los minimos y maximos posibles
if max_k > len(points2_clusters):
raise ValueError('The max_k value is too large for the number of points')
if max_k_percent != None:
max_k = int(len(points2_clusters)*max_k_percent)
if min_k > len(points2_clusters):
print('The min_k value is too large for the number of points returns empty clusters')
if ret_noise == True:
return [] , points2_clusters
else:
return []
if step_k > len(points2_clusters):
raise ValueError('The step_k value is too large for the number of points')
if min_k == max_k:
print('min_k reset to obtain at least 1 value')
min_k = max_k-1
if scale_points ==True:
scaler = StandardScaler()
points_arr = scaler.fit_transform(points2_clusters)
else:
points_arr = points2_clusters
kdt= cKDTree(points_arr, leafsize=leaf_size)
lits_appe_all_aver=[]
for j in range( min_k, max_k, step_k ):
dist_va, ind = kdt.query(points_arr, k=j, n_jobs =-1)
non_zero = dist_va[:, 1:]
non_zero = np.ndarray.flatten(non_zero)
non_zero = np.sort(non_zero)
lis_aver_k=[]
for i in range(int(non_zero.shape[0]/(j-1)) -1):
lis_aver_k.append(np.average(non_zero[i*(j-1):(i+1)*(j-1)]))
average_arr= np.array(lis_aver_k)
kneedle_1_average = kneed.KneeLocator(
range(average_arr.shape[0]),
average_arr,
curve="convex",## This should be the case since the values are sorted
direction="increasing", ## This should be the case since the values are sorted incresing
online=True, ### To find the correct knee the false returns the first find
)
epsilon= kneedle_1_average.knee_y
min_point = kneedle_1_average.knee
#### We take the average never the less
lits_appe_all_aver.append({ 'k':j,
'Epsilon':epsilon,
'value':min_point})
#### Check if the list is empty
if len(lits_appe_all_aver) ==0:
if debugg:
print('DBSCAN')
print('Using 0.6 as epsilon and 20 as Minpoints')
db_scan= DBSCAN(eps=0.6, min_samples=20).fit(points_arr)
else:
df_all_average= | pd.DataFrame(lits_appe_all_aver) | pandas.DataFrame |
import os
import shutil
from attrdict import AttrDict
import numpy as np
import pandas as pd
from scipy.stats import gmean
from deepsense import neptune
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold
from . import pipeline_config as cfg
from .pipelines import PIPELINES
from .hyperparameter_tuning import RandomSearchTuner, HyperoptTuner, SkoptTuner, set_params
from .utils import init_logger, read_params, set_seed, create_submission, verify_submission, calculate_rank, \
read_oof_predictions, parameter_eval
set_seed(cfg.RANDOM_SEED)
logger = init_logger()
ctx = neptune.Context()
params = read_params(ctx, fallback_file='./configs/neptune.yaml')
class PipelineManager:
def train(self, pipeline_name, dev_mode):
train(pipeline_name, dev_mode)
def evaluate(self, pipeline_name, dev_mode):
evaluate(pipeline_name, dev_mode)
def predict(self, pipeline_name, dev_mode, submit_predictions):
predict(pipeline_name, dev_mode, submit_predictions)
def train_evaluate_cv(self, pipeline_name, model_level, dev_mode):
train_evaluate_cv(pipeline_name, model_level, dev_mode)
def train_evaluate_predict_cv(self, pipeline_name, model_level, dev_mode, submit_predictions):
train_evaluate_predict_cv(pipeline_name, model_level, dev_mode, submit_predictions)
def train(pipeline_name, dev_mode):
logger.info('TRAINING')
if bool(params.clean_experiment_directory_before_training) and os.path.isdir(params.experiment_directory):
logger.info('Cleaning experiment_directory...')
shutil.rmtree(params.experiment_directory)
tables = _read_data(dev_mode)
logger.info('Shuffling and splitting into train and test...')
train_data_split, valid_data_split = train_test_split(tables.train_set,
test_size=params.validation_size,
random_state=cfg.RANDOM_SEED,
shuffle=params.shuffle)
logger.info('Target mean in train: {}'.format(train_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Target mean in valid: {}'.format(valid_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Train shape: {}'.format(train_data_split.shape))
logger.info('Valid shape: {}'.format(valid_data_split.shape))
train_data = {'main_table': {'X': train_data_split.drop(cfg.TARGET_COLUMNS, axis=1),
'y': train_data_split[cfg.TARGET_COLUMNS].values.reshape(-1),
'X_valid': valid_data_split.drop[cfg.TARGET_COLUMNS].values.reshape(-1),
'y_valid': valid_data_split[cfg.TARGET_COLUMNS].values.reshape(-1),
},
'application': {'X': tables.application},
'bureau_balance': {'X': tables.bureau_balance},
'bureau': {'X': tables.bureau},
'credit_card_balance': {'X': tables.credit_card_balance},
'installments_payments': {'X': tables.installments_payments},
'pos_cash_balance': {'X': tables.pos_cash_balance},
'previous_application': {'X': tables.previous_application},
}
pipeline = PIPELINES[pipeline_name](config=cfg.SOLUTION_CONFIG, train_mode=True)
pipeline.clean_cache()
logger.info('Start pipeline fit and transform')
pipeline.fit_transform(train_data)
pipeline.clean_cache()
def evaluate(pipeline_name, dev_mode):
logger.info('EVALUATION')
logger.info('Reading data...')
tables = _read_data(dev_mode)
logger.info('Shuffling and splitting to get validation split...')
_, valid_data_split = train_test_split(tables.train_set,
test_size=params.validation_size,
random_state=cfg.RANDOM_SEED,
shuffle=params.shuffle)
logger.info('Target mean in valid: {}'.format(valid_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Valid shape: {}'.format(valid_data_split.shape))
y_true = valid_data_split[cfg.TARGET_COLUMNS].values
eval_data = {'main_table': {'X': valid_data_split.drop(cfg.TARGET_COLUMNS, axis=1),
'y': None,
},
'application': {'X': tables.application},
'bureau_balance': {'X': tables.bureau_balance},
'bureau': {'X': tables.bureau},
'credit_card_balance': {'X': tables.credit_card_balance},
'installments_payments': {'X': tables.installments_payments},
'pos_cash_balance': {'X': tables.pos_cash_balance},
'previous_application': {'X': tables.previous_application},
}
pipeline = PIPELINES[pipeline_name](config=cfg.SOLUTION_CONFIG, train_mode=False)
pipeline.clean_cache()
logger.info('Start pipeline transform')
output = pipeline.transform(eval_data)
pipeline.clean_cache()
y_pred = output['prediction']
logger.info('Calculating ROC_AUC on validation set')
score = roc_auc_score(y_true, y_pred)
logger.info('ROC_AUC score on validation is {}'.format(score))
ctx.channel_send('ROC_AUC', 0, score)
def predict(pipeline_name, dev_mode, submit_predictions):
logger.info('PREDICTION')
tables = _read_data(dev_mode)
test_data = {'main_table': {'X': tables.test_set,
'y': None,
},
'application': {'X': tables.application},
'bureau_balance': {'X': tables.bureau_balance},
'bureau': {'X': tables.bureau},
'credit_card_balance': {'X': tables.credit_card_balance},
'installments_payments': {'X': tables.installments_payments},
'pos_cash_balance': {'X': tables.pos_cash_balance},
'previous_application': {'X': tables.previous_application},
}
pipeline = PIPELINES[pipeline_name](config=cfg.SOLUTION_CONFIG, train_mode=False)
pipeline.clean_cache()
logger.info('Start pipeline transform')
output = pipeline.transform(test_data)
pipeline.clean_cache()
y_pred = output['prediction']
if not dev_mode:
logger.info('creating submission file...')
submission = create_submission(tables.test_set, y_pred)
logger.info('verifying submission...')
sample_submission = pd.read_csv(params.sample_submission_filepath)
verify_submission(submission, sample_submission)
submission_filepath = os.path.join(params.experiment_directory, 'submission.csv')
submission.to_csv(submission_filepath, index=None, encoding='utf-8')
logger.info('submission persisted to {}'.format(submission_filepath))
logger.info('submission head \n\n{}'.format(submission.head()))
if submit_predictions and params.kaggle_api:
make_submission(submission_filepath)
def train_evaluate_cv(pipeline_name, model_level, dev_mode):
if parameter_eval(params.hyperparameter_search__method) is not None:
score_mean, score_std = train_evaluate_cv_tuning(pipeline_name, model_level, dev_mode)
else:
score_mean, score_std = train_evaluate_cv_one_run(pipeline_name, model_level, cfg.SOLUTION_CONFIG, dev_mode)
logger.info('ROC_AUC mean {}, ROC_AUC std {}'.format(score_mean, score_std))
ctx.channel_send('ROC_AUC', 0, score_mean)
ctx.channel_send('ROC_AUC STD', 0, score_std)
def train_evaluate_cv_tuning(pipeline_name, model_level, dev_mode):
config = cfg.SOLUTION_CONFIG
searchable_config = cfg.SOLUTION_CONFIG.tuner
if params.hyperparameter_search__method == 'random':
tuner = RandomSearchTuner(config=searchable_config,
runs=params.hyperparameter_search__runs)
elif params.hyperparameter_search__method == 'skopt':
tuner = SkoptTuner(config=searchable_config,
runs=params.hyperparameter_search__runs,
maximize=True)
elif params.hyperparameter_search__method == 'hyperopt':
tuner = HyperoptTuner(config=searchable_config,
runs=params.hyperparameter_search__runs,
maximize=True)
else:
raise NotImplementedError
results = []
while tuner.in_progress:
if tuner.run_id == 0:
proposed_config = tuner.next(None)
else:
proposed_config = tuner.next(score_mean)
config = set_params(config, proposed_config)
score_mean, score_std = train_evaluate_cv_one_run(pipeline_name, model_level, config, dev_mode,
tunable_mode=True)
logger.info('Run {} ROC_AUC mean {}, ROC_AUC std {}'.format(tuner.run_id, score_mean, score_std))
ctx.channel_send('Tuning CONFIG', tuner.run_id, proposed_config)
ctx.channel_send('Tuning ROC_AUC', tuner.run_id, score_mean)
ctx.channel_send('Tuning ROC_AUC STD', tuner.run_id, score_std)
results.append((score_mean, score_std, proposed_config))
best_score_mean, best_score_std, best_config = sorted(results, key=lambda x: x[0])[-1]
logger.info('ROC_AUC mean {}, ROC_AUC std {}'.format(best_score_mean, best_score_std))
logger.info('Best Params'.format(best_config))
ctx.channel_send('BEST_CONFIG', str(best_config))
return best_score_mean, best_score_std
def train_evaluate_cv_one_run(pipeline_name, model_level, config, dev_mode, tunable_mode=False):
if bool(params.clean_experiment_directory_before_training) and os.path.isdir(params.experiment_directory):
logger.info('Cleaning experiment_directory...')
shutil.rmtree(params.experiment_directory)
if model_level == 'first':
tables = _read_data(dev_mode)
main_table_train = tables.train_set
elif model_level == 'second':
tables = _read_data(dev_mode=False)
main_table_train, main_table_test = read_oof_predictions(params.first_level_oof_predictions_dir,
params.train_filepath,
id_column=cfg.ID_COLUMNS[0],
target_column=cfg.TARGET_COLUMNS[0])
else:
raise NotImplementedError
target_values = main_table_train[cfg.TARGET_COLUMNS].values.reshape(-1)
fold_generator = _get_fold_generator(target_values)
fold_scores = []
for fold_id, (train_idx, valid_idx) in enumerate(fold_generator):
train_data_split, valid_data_split = main_table_train.iloc[train_idx], main_table_train.iloc[valid_idx]
logger.info('Started fold {}'.format(fold_id))
logger.info('Target mean in train: {}'.format(train_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Target mean in valid: {}'.format(valid_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Train shape: {}'.format(train_data_split.shape))
logger.info('Valid shape: {}'.format(valid_data_split.shape))
score, _, _ = _fold_fit_evaluate_loop(train_data_split,
valid_data_split,
tables,
fold_id, pipeline_name, config, model_level)
logger.info('Fold {} ROC_AUC {}'.format(fold_id, score))
if not tunable_mode:
ctx.channel_send('Fold {} ROC_AUC'.format(fold_id), 0, score)
fold_scores.append(score)
score_mean, score_std = np.mean(fold_scores), np.std(fold_scores)
return score_mean, score_std
def train_evaluate_predict_cv(pipeline_name, model_level, dev_mode, submit_predictions):
if bool(params.clean_experiment_directory_before_training) and os.path.isdir(params.experiment_directory):
logger.info('Cleaning experiment_directory...')
shutil.rmtree(params.experiment_directory)
if model_level == 'first':
tables = _read_data(dev_mode)
main_table_train = tables.train_set
main_table_test = tables.test_set
elif model_level == 'second':
tables = _read_data(dev_mode=False)
main_table_train, main_table_test = read_oof_predictions(params.first_level_oof_predictions_dir,
params.train_filepath,
id_column=cfg.ID_COLUMNS[0],
target_column=cfg.TARGET_COLUMNS[0])
main_table_test = main_table_test.groupby(cfg.ID_COLUMNS).mean().reset_index()
else:
raise NotImplementedError
target_values = main_table_train[cfg.TARGET_COLUMNS].values.reshape(-1)
fold_generator = _get_fold_generator(target_values)
fold_scores, out_of_fold_train_predictions, out_of_fold_test_predictions = [], [], []
for fold_id, (train_idx, valid_idx) in enumerate(fold_generator):
train_data_split, valid_data_split = main_table_train.iloc[train_idx], main_table_train.iloc[valid_idx]
logger.info('Started fold {}'.format(fold_id))
logger.info('Target mean in train: {}'.format(train_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Target mean in valid: {}'.format(valid_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Train shape: {}'.format(train_data_split.shape))
logger.info('Valid shape: {}'.format(valid_data_split.shape))
score, out_of_fold_prediction, test_prediction = _fold_fit_evaluate_predict_loop(train_data_split,
valid_data_split,
main_table_test,
tables,
fold_id,
pipeline_name,
model_level)
logger.info('Fold {} ROC_AUC {}'.format(fold_id, score))
ctx.channel_send('Fold {} ROC_AUC'.format(fold_id), 0, score)
out_of_fold_train_predictions.append(out_of_fold_prediction)
out_of_fold_test_predictions.append(test_prediction)
fold_scores.append(score)
out_of_fold_train_predictions = pd.concat(out_of_fold_train_predictions, axis=0)
out_of_fold_test_predictions = pd.concat(out_of_fold_test_predictions, axis=0)
test_prediction_aggregated = _aggregate_test_prediction(out_of_fold_test_predictions)
score_mean, score_std = np.mean(fold_scores), np.std(fold_scores)
logger.info('ROC_AUC mean {}, ROC_AUC std {}'.format(score_mean, score_std))
ctx.channel_send('ROC_AUC', 0, score_mean)
ctx.channel_send('ROC_AUC STD', 0, score_std)
logger.info('Saving predictions')
out_of_fold_train_predictions.to_csv(os.path.join(params.experiment_directory,
'{}_out_of_fold_train_predictions.csv'.format(pipeline_name)),
index=None)
out_of_fold_test_predictions.to_csv(os.path.join(params.experiment_directory,
'{}_out_of_fold_test_predictions.csv'.format(pipeline_name)),
index=None)
test_aggregated_file_path = os.path.join(params.experiment_directory,
'{}_test_predictions_{}.csv'.format(pipeline_name,
params.aggregation_method))
test_prediction_aggregated.to_csv(test_aggregated_file_path, index=None)
if not dev_mode:
logger.info('verifying submission...')
sample_submission = pd.read_csv(params.sample_submission_filepath)
verify_submission(test_prediction_aggregated, sample_submission)
if submit_predictions and params.kaggle_api:
make_submission(test_aggregated_file_path)
def make_submission(submission_filepath):
logger.info('making Kaggle submit...')
os.system('kaggle competitions submit -c home-credit-default-risk -f {} -m {}'
.format(submission_filepath, params.kaggle_message))
def _read_data(dev_mode):
logger.info('Reading data...')
if dev_mode:
nrows = cfg.DEV_SAMPLE_SIZE
logger.info('running in "dev-mode". Sample size is: {}'.format(cfg.DEV_SAMPLE_SIZE))
else:
nrows = None
if any([parameter_eval(params.use_bureau),
parameter_eval(params.use_bureau_aggregations)]):
nrows_bureau = nrows
else:
nrows_bureau = 1
if parameter_eval(params.use_bureau_balance):
nrows_bureau_balance = nrows
else:
nrows_bureau_balance = 1
if any([parameter_eval(params.use_credit_card_balance),
parameter_eval(params.use_credit_card_balance_aggregations)]):
nrows_credit_card_balance = nrows
else:
nrows_credit_card_balance = 1
if any([parameter_eval(params.use_installments_payments),
parameter_eval(params.use_installments_payments_aggregations)]):
nrows_installments_payments = nrows
else:
nrows_installments_payments = 1
if any([parameter_eval(params.use_pos_cash_balance),
parameter_eval(params.use_pos_cash_balance_aggregations)]):
nrows_pos_cash_balance = nrows
else:
nrows_pos_cash_balance = 1
if any([parameter_eval(params.use_previous_applications),
parameter_eval(params.use_previous_applications_aggregations),
parameter_eval(params.use_previous_application_categorical_features),
parameter_eval(params.use_application_previous_application_categorical_features)]):
nrows_previous_applications = nrows
else:
nrows_previous_applications = 1
raw_data = {}
logger.info('Reading application_train ...')
application_train = | pd.read_csv(params.train_filepath, nrows=nrows) | pandas.read_csv |
__author__ = 'brendan'
import main
import pandas as pd
import numpy as np
from datetime import datetime as dt
from matplotlib import pyplot as plt
import random
import itertools
import time
import dateutil
from datetime import timedelta
cols = ['BoP FA Net', 'BoP FA OI Net', 'BoP FA PI Net', 'CA % GDP']
raw_data = pd.read_csv('raw_data/BoP_UK.csv', index_col=0, parse_dates=True)
data = pd.DataFrame(raw_data.iloc[:240, :4].fillna(0)).astype(float)
data.columns = cols
data.index = pd.date_range('1955-01-01', '2014-12-31', freq='Q')
raw_eur = pd.read_csv('raw_data/EUR_CA.csv', index_col=0, parse_dates=True)
raw_eur = raw_eur[::-1]
raw_eur.index = pd.date_range('1999-01-01', '2015-03-01', freq='M')
raw_eur.index.name = 'Date'
raw_eur = raw_eur.resample('Q', how='sum')
data_eur_gdp_q = pd.read_csv('raw_data/MACRO_DATA.csv', index_col=0, parse_dates=True)['EUR_GDP_Q'].dropna()
data_eur_gdp_q.columns = ['EUR_GDP_Q']
data_eur_gdp_q.index.name = 'Date'
data_eur_gdp_q = data_eur_gdp_q.loc['1999-03-31':]
end_gdp = pd.DataFrame(data=[data_eur_gdp_q.iloc[-1], data_eur_gdp_q.iloc[-1],
data_eur_gdp_q.iloc[-1], data_eur_gdp_q.iloc[-1]],
index=pd.date_range('2014-06-30', '2015-03-31', freq='Q'))
eur_gdp = pd.concat([data_eur_gdp_q, end_gdp])
eur_gdp.columns = ['EUR_CA']
eur_ca = raw_eur.div(eur_gdp)
eur_ca.columns = ['EUR CA']
uk_ca = data['CA % GDP'] / 100.0
uk_ca.columns = ['UK CA']
uk_fa = | pd.DataFrame(data.iloc[:, :3]) | pandas.DataFrame |
from arche.rules.coverage import check_fields_coverage, compare_fields_counts
from arche.rules.result import Level
from conftest import create_result, Job
import pandas as pd
import pytest
@pytest.mark.parametrize(
"df, expected_messages",
[
(
pd.DataFrame([{"_key": 0}]),
{
Level.INFO: [
(
"0 totally empty field(s)",
(
pd.DataFrame(
[(1, 100)],
columns=["Values Count", "Percent"],
index=["_key"],
)
.rename_axis("Field")
.to_string()
),
)
]
},
),
(
pd.DataFrame([(0, None)], columns=["_key", "Field"]),
{
Level.ERROR: [
(
"1 totally empty field(s)",
(
pd.DataFrame(
[(0, 0), (1, 100)],
columns=["Values Count", "Percent"],
index=["Field", "_key"],
)
.rename_axis("Field")
.to_string()
),
)
]
},
),
(
pd.DataFrame([(0, "")], columns=["_key", "Field"]),
{
Level.INFO: [
(
"0 totally empty field(s)",
(
pd.DataFrame(
[(1, 100), (1, 100)],
columns=["Values Count", "Percent"],
index=["Field", "_key"],
)
.rename_axis("Field")
.to_string()
),
)
]
},
),
],
)
def test_check_fields_coverage(df, expected_messages):
result = check_fields_coverage(df)
assert result == create_result("Fields Coverage", expected_messages)
@pytest.mark.parametrize(
"source_stats, target_stats, expected_messages",
[
(
{"counts": {"f1": 100, "f2": 150}, "totals": {"input_values": 100}},
{"counts": {"f2": 100, "f3": 150}, "totals": {"input_values": 100}},
{
Level.ERROR: [
(
"Coverage difference is greater than 10% for 3 field(s)",
(
pd.DataFrame([100, 33, 100], columns=["Difference, %"])
.rename(index={0: "f1", 1: "f2", 2: "f3"})
.to_string()
),
)
]
},
),
(
{"counts": {"f1": 100, "f2": 150}, "totals": {"input_values": 100}},
{"counts": {"f1": 106, "f2": 200}, "totals": {"input_values": 100}},
{
Level.ERROR: [
(
("Coverage difference is greater than 10% for 1 field(s)"),
(
| pd.DataFrame([25], columns=["Difference, %"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import re
import math
import sys
def top_extract(s):
top = []
for i in range (1,len(s)+1):
if s[i-1].lower() == 'top':
top.append(s[i])
return top
def base_extract(s):
base = []
for i in range (1,len(s)+1):
if s[i-1].lower() == 'base':
base.append(s[i])
return base
def middle_extract(s):
middle = []
for i in range (1,len(s)+1):
if s[i-1].lower() == 'middle':
middle.append(s[i])
return middle
def note_extract(s):
result = []
location = ['top', 'middle', 'base']
for i in range (1,len(s)+1):
for ll in location:
if s[i-1].lower() == ll:
result.append(s[i])
return result
# 去掉所有notes前面的tag以及top,middle,base
# 如果没有的 设为空 nan
def delete_note_tag(s):
s = re.split('-1|0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20',s)[1]
if len(s)==0:
s = np.nan
return s
def notes_table(ori_data):
data = ori_data.loc[:, ['title', 'notes_1', 'notes_2', 'notes_3', 'notes_4', 'notes_5',
'notes_6', 'notes_7', 'notes_8', 'notes_9', 'notes_10',
'notes_11', 'notes_12', 'notes_13', 'notes_14', 'notes_15',
'notes_16', 'notes_17', 'notes_18', 'notes_19', 'notes_20']]
split_data = data
split_data.fillna('-1', inplace=True)
for i in range(1, 21):
split_data['notes_{}'.format(str(i))] = data['notes_{}'.format(str(i))].apply(
lambda s: delete_note_tag(s))
note_in_perfume = pd.DataFrame(columns=['perfume_name', 'note_name'])
rows, cols = data.shape
# 处理所有的notes 对应好(note与perfume的对应关系
for row in range(0, rows):
cur_perfume = split_data['title'][row]
i = 1
while i < 21:
if pd.isnull(data['notes_{}'.format(str(i))][row]):
i = 21
else:
new = pd.DataFrame({'perfume_name': cur_perfume,
'note_name': data['notes_{}'.format(str(i))][row]}, index=[1])
note_in_perfume = note_in_perfume.append(new, ignore_index=True)
i += 1
# 将所有的note 放到集合中,-》得到一张note的表格
note_list = list(set(note_in_perfume['note_name'].tolist()))
note_table = pd.DataFrame(note_list, columns=['note_name'])
note_table.to_csv('nnnnew_note.csv', index=False)
note_in_perfume.to_csv('note_in_perfume.csv', index=False)
'''
data = ori_data['title']
for i in range(1, 21):
data['notes_{}'.format(str(i))] = data['notes_{}'.format(str(i))]
data = ori_data
split_data = data
split_data.fillna('-1', inplace=True)
for i in range(1, 21):
split_data['notes_{}'.format(str(i))] = data['notes_{}'.format(str(i))].apply(
lambda s: re.split('-1|0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20', s))
notes = split_data['notes_1']
for i in range(2, 21):
notes = notes + split_data['notes_{}'.format(str(i))]
notes = notes.apply(lambda s: list(filter(lambda x: x != '', s)))
# 提取了所有的的notes-》整合到一列里面
test_notes = notes.apply(note_extract)
#top_notes = notes.apply(top_extract)
#middle_notes = notes.apply(middle_extract)
#base_notes = notes.apply(base_extract)
'''
return
def perfume_table(original_data):
rows, cols = original_data.shape
data = pd.DataFrame(columns=['title', 'brand', 'date', 'image', 'description', 'target'])
data['title'] = data['title'].astype(np.str)
data['brand'] = original_data['brand']
data['date'] = original_data['date']
data['image'] = data['image'].astype(np.str)
data['description'] = data['description'].astype(np.str)
data['target'] = 0
# perfume_name, brand, date, image, description, target
# 处理title 去掉所有的for women/men 对应到target里面
target_dict = {'for women': 0,
'for men': 1,
'for women and men': 2}
for r in range(0, rows):
item = original_data['title'][r]
if 'for men' in item:
tt = target_dict['for men']
title = item[0:(item.find('for') - 1)]
data.loc[r, 'title'] = title
elif 'for women' in item:
if 'for women and men' in item:
tt = target_dict['for women and men']
else:
tt = target_dict['for women']
title = item[0:(item.find('for') - 1)]
data.loc[r, 'title'] = title
else:
tt = 3
data.loc[r, 'title'] = title
data.loc[r, 'target'] = tt
data['target'] = data['target'].astype(dtype=int)
data.rename(columns={'title': 'perfume_name'}, inplace=True)
data.to_csv('nnnnew_perfume.csv', index = False)
return
# 将csv数据全部变成sql 的insert语句
def insert_perfume_data_into_sql():
pp_index = pd.read_csv('/Users/woody/UNSW-MIT/21T2-COMP9900/pp_index.csv')
pp_df = pp_index[['Unnamed: 0', 'perfume_name', 'brand', 'date', 'target']]
d = pp_df.values.tolist()
k_list = [0, 10000, 20000, 30000, 40000, 51212]
k = 0
while k in range(0, 5):
k_1 = k_list[k]
k_2 = k_list[k + 1]
result = 'INSERT INTO ttperfume(ttperfume_id, ttperfume_name, ttbrand, ttdate, tttarget) VALUES'
i = k_1
while i in range(k_1, k_2):
if pd.isna(d[i][1]):
d[i][1] = d[i][2]
if "'" in d[i][1]:
d[i][1] = d[i][1].replace("'", "''")
if "'" in d[i][2]:
d[i][2] = d[i][2].replace("'", "''")
if i != k_2 - 1:
dd = '(' + str(d[i][0]) + ", '" + str(d[i][1]) + "', '" + str(d[i][2]) + "', " + str(
d[i][3]) + ", " + str(d[i][4]) + '),'
else:
dd = '(' + str(d[i][0]) + ", '" + str(d[i][1]) + "', '" + str(d[i][2]) + "', " + str(
d[i][3]) + ", " + str(d[i][4]) + ');'
result = result + dd
i += 1
# result = result.replace('"',"'",10086)
name = 'ttttpp_index_' + str(k_1) + '_' + str(k_2) + 'k.txt'
fh = open(name, 'w')
fh.write(result)
fh.close()
k += 1
return
# note_in_perfume 处理
# 去重 -》 将csv变成insert语句(str)
def process_n_in_p():
note_df = pd.read_csv('/Users/woody/UNSW-MIT/21T2-COMP9900/note_index.csv')
nn = note_df.set_index('note_name')
note_dic = nn.to_dict()['Unnamed: 0']
# key: perfume_name value:perfume_id
pp1 = pd.read_csv('/Users/woody/UNSW-MIT/21T2-COMP9900/perfume_for_index.csv')
pp12 = pp1.set_index('title')
pp_dic = pp12.to_dict()['Unnamed: 0']
# key: perfume_id value:perfume_name 用于检验用的
pp22 = pp1.set_index('Unnamed: 0')
p2_dic = pp22.to_dict()['title']
n_in_p = | pd.read_csv('/Users/woody/UNSW-MIT/21T2-COMP9900/note_in_perfume.csv') | pandas.read_csv |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([True, True, False])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 3, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than_or_equal_to({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_contains(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": 5
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).does_not_contain({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([False, True, True])))
def test_contains_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["POKEMON", "CHARIZARD", "BULBASAUR"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "PIKACHU"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["pikachu", "charizard", "bulbasaur"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var1",
"comparator": "IVYSAUR"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([False, True, True])))
def test_is_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [4,5,6]
}).equals(pandas.Series([False, False, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([True, True, False])))
def test_is_not_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([False, False, True])))
def test_is_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
def test_is_not_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
def test_prefix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).prefix_matches_regex({
"target": "--r2",
"comparator": "w.*",
"prefix": 2
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([False, False])))
def test_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).suffix_matches_regex({
"target": "--r1",
"comparator": "es.*",
"suffix": 3
}).equals(pandas.Series([False, True])))
self.assertTrue(DataframeType({"value": df}).suffix_matches_regex({
"target": "var1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([False, False])))
def test_not_prefix_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_prefix_matches_regex({
"target": "--r1",
"comparator": ".*",
"prefix": 2
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).not_prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([True, True])))
def test_not_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_suffix_matches_regex({
"target": "var1",
"comparator": ".*",
"suffix": 3
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_suffix_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([True, True])))
def test_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).matches_regex({
"target": "--r1",
"comparator": ".*",
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).matches_regex({
"target": "var2",
"comparator": "[0-9].*",
}).equals(pandas.Series([False, False])))
def test_not_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_matches_regex({
"target": "var1",
"comparator": ".*",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
}).equals(pandas.Series([True, True])))
def test_starts_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).starts_with({
"target": "--r1",
"comparator": "WO",
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).starts_with({
"target": "var2",
"comparator": "ABC",
}).equals(pandas.Series([False, False])))
def test_ends_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).ends_with({
"target": "--r1",
"comparator": "abc",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).ends_with({
"target": "var1",
"comparator": "est",
}).equals(pandas.Series([False, True])))
def test_has_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([True, False])))
def test_has_not_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_not_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([False, True])))
def test_longer_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
def test_longer_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_shorter_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'val']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
def test_shorter_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_all({
"target": "--r1",
"comparator": "--r2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_not_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_invalid_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2099'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).invalid_date({"target": "--r1"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var3"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var2"})
.equals(pandas.Series([False, False, False, True, True])))
def test_date_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var1", "comparator": '2021'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "1997-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([False, False, True, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).date_equal_to({"target": "--r3", "comparator": "--r4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "hour"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "minute"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "second"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "microsecond"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_not_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_less_than(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals( | pandas.Series([False, False, True, True, True]) | pandas.Series |
#Cluster-then-predict for classification tasks
#https://towardsdatascience.com/cluster-then-predict-for-classification-tasks-142fdfdc87d6
# author :<NAME>
from sklearn.datasets import make_classification
# Dataset
X, y = make_classification(n_samples=1000, n_features=8, n_informative=5, n_classes=4)
import pandas as pd
df = pd.DataFrame(X, columns=['f{}'.format(i) for i in range(8)])
#Divide into Train/Test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.25, random_state=90210)
# Applying K-means
import numpy as np
from sklearn.cluster import KMeans
from typing import Tuple
def get_clusters(X_train: pd.DataFrame, X_test: pd.DataFrame, n_clusters: int) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
applies k-means clustering to training data to find clusters and predicts them for the test set
"""
clustering = KMeans(n_clusters=n_clusters, random_state=8675309,n_jobs=-1)
clustering.fit(X_train)
# apply the labels
train_labels = clustering.labels_
X_train_clstrs = X_train.copy()
X_train_clstrs['clusters'] = train_labels
# predict labels on the test set
test_labels = clustering.predict(X_test)
X_test_clstrs = X_test.copy()
X_test_clstrs['clusters'] = test_labels
return X_train_clstrs, X_test_clstrs
X_train_clstrs, X_test_clstrs = get_clusters(X_train, X_test, 2)
# Scaling
from sklearn.preprocessing import StandardScaler
def scale_features(X_train: pd.DataFrame, X_test: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
applies standard scaler (z-scores) to training data and predicts z-scores for the test set
"""
scaler = StandardScaler()
to_scale = [col for col in X_train.columns.values]
scaler.fit(X_train[to_scale])
X_train[to_scale] = scaler.transform(X_train[to_scale])
# predict z-scores on the test set
X_test[to_scale] = scaler.transform(X_test[to_scale])
return X_train, X_test
X_train_scaled, X_test_scaled = scale_features(X_train_clstrs, X_test_clstrs)
# Experimentation
# to divide the df by cluster, we need to ensure we use the correct class labels, we'll use pandas to do that
train_clusters = X_train_scaled.copy()
test_clusters = X_test_scaled.copy()
train_clusters['y'] = y_train
test_clusters['y'] = y_test
# locate the "0" cluster
train_0 = train_clusters.loc[train_clusters.clusters < 0] # after scaling, 0 went negtive
test_0 = test_clusters.loc[test_clusters.clusters < 0]
y_train_0 = train_0.y.values
y_test_0 = test_0.y.values
# locate the "1" cluster
train_1 = train_clusters.loc[train_clusters.clusters > 0] # after scaling, 1 dropped slightly
test_1 = test_clusters.loc[test_clusters.clusters > 0]
y_train_1 = train_1.y.values
y_test_1 = test_1.y.values
# the base dataset has no "clusters" feature
X_train_base = X_train_scaled.drop(columns=['clusters'])
X_test_base = X_test_scaled.drop(columns=['clusters'])
# drop the targets from the training set
X_train_0 = train_0.drop(columns=['y'])
X_test_0 = test_0.drop(columns=['y'])
X_train_1 = train_1.drop(columns=['y'])
X_test_1 = test_1.drop(columns=['y'])
datasets = {
'base': (X_train_base, y_train, X_test_base, y_test),
'cluster-feature': (X_train_scaled, y_train, X_test_scaled, y_test),
'cluster-0': (X_train_0, y_train_0, X_test_0, y_test_0),
'cluster-1': (X_train_1, y_train_1, X_test_1, y_test_1),
}
from sklearn.linear_model import LogisticRegression
from sklearn import model_selection
from sklearn.metrics import classification_report
def run_exps(datasets: dict) -> pd.DataFrame:
'''
runs experiments on a dict of datasets
'''
# initialize a logistic regression classifier
model = LogisticRegression(class_weight='balanced', solver='lbfgs', random_state=999, max_iter=250)
dfs = []
results = []
conditions = []
scoring = ['accuracy','precision_weighted','recall_weighted','f1_weighted']
for condition, splits in datasets.items():
X_train = splits[0]
y_train = splits[1]
X_test = splits[2]
y_test = splits[3]
kfold = model_selection.KFold(n_splits=5, shuffle=True, random_state=90210)
cv_results = model_selection.cross_validate(model, X_train, y_train, cv=kfold, scoring=scoring)
clf = model.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(condition)
print(classification_report(y_test, y_pred))
results.append(cv_results)
conditions.append(condition)
this_df = pd.DataFrame(cv_results)
this_df['condition'] = condition
dfs.append(this_df)
final = pd.concat(dfs, ignore_index=True)
# We have wide format data, lets use pd.melt to fix this
results_long = pd.melt(final,id_vars=['condition'],var_name='metrics', value_name='values')
# fit time metrics, we don't need these
time_metrics = ['fit_time','score_time']
results = results_long[~results_long['metrics'].isin(time_metrics)] # get df without fit data
results = results.sort_values(by='values')
return results
df = run_exps(datasets)
# results
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(20, 12))
sns.set(font_scale=2.5)
g = sns.boxplot(x="condition", y="values", hue="metrics", data=df, palette="Set3")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.title('Comparison of Dataset by Classification Metric')
| pd.pivot_table(df, index='condition',columns=['metrics'],values=['values'], aggfunc='mean') | pandas.pivot_table |
#!/usr/bin/env python
'''
Define tools to read and analyze crush data.
Written by <NAME>
For CIGITI at the Hospital for Sick Children Toronto
'''
# IMPORTS
import pandas as pd
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
import os
import platform
from pathlib import Path
import glob
import re
from pdb import set_trace
# CONSTANTS
PATH = Path('/Users/mattmacdonald/Data/RAWDATA_CRUSH_PAPER2/')
PIN_DIAM = 5.0 # mm
# IMPORT FUNCTIONS
def study_outline(root_folder=None):
"""
Reads study patients and associated details from a single csv file
in the root folder containing study outline details, at minimum:
Patient Code,Procedure Date,Gender,DOB,Procedure,Tissue,Surgeon,
Notes,Issues,Histology,Classification
File must be named "*MASTERLIST.csv"
Assumes all data is kept in sub folders in root folder
Returns dataframe with Test ID as index
"""
def get_folder_name(row):
date_fmt = "%Y%m%d"
fmt = "{date} - {code} - {clsf}"
date = row['Procedure Date'].strftime(date_fmt)
code = row['Patient Code'].upper()
clsf = row['Classification'].upper()
return fmt.format(**locals())
# Read outline file
if root_folder is None:
root_folder = Path.cwd()
files = glob.glob(str(root_folder / '*MASTERLIST.csv'))
assert len(files) == 1, ('Root data folder must contain one master '
'csv file.')
study = pd.read_csv(root_folder / files[0])
# Cleanup and organize information, including data subfolders
study = study.fillna('N/A')
study['Procedure Date'] = pd.to_datetime(study['Procedure Date'],
format='%m/%d/%Y')
study['DOB'] = pd.to_datetime(study['DOB'],
format='%m/%d/%Y')
study['Age'] = study['Procedure Date'] - study['DOB']
study['Folder Name'] = study.apply(get_folder_name, axis=1)
study.index = study.index + 1 # one indexed
study = study.rename_axis('Test ID')
return study
def study_targets(root_folder=None):
"""
Reads targets for the dataset from a csv file in the root folder
File must be named "*TARGETS.csv"
Returns dataframe
"""
# Read targets file
if root_folder is None:
root_folder = Path.cwd()
files = glob.glob(str(root_folder / '*TARGETS.csv'))
assert len(files) == 1, ('Root data folder must contain one targets '
'csv file.')
targets = | pd.read_csv(root_folder / files[0]) | pandas.read_csv |
from coralquant.models.odl_model import BS_SZ50_Stocks
import baostock as bs
import pandas as pd
from sqlalchemy import String
from coralquant.database import engine
from coralquant.settings import CQ_Config
def get_sz50_stocks():
"""
获取上证50成分股数据
"""
#删除数据
BS_SZ50_Stocks.del_all_data()
# 登陆系统
lg = bs.login()
# 显示登陆返回信息
print('login respond error_code:' + lg.error_code)
print('login respond error_msg:' + lg.error_msg)
# 获取上证50成分股
rs = bs.query_sz50_stocks()
print('query_sz50 error_code:'+rs.error_code)
print('query_sz50 error_msg:'+rs.error_msg)
# 打印结果集
sz50_stocks = []
while (rs.error_code == '0') & rs.next():
# 获取一条记录,将记录合并在一起
sz50_stocks.append(rs.get_row_data())
result = | pd.DataFrame(sz50_stocks, columns=rs.fields) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 21 14:16:53 2018
@author: deborahkhider
This routine takes the output of the weather
generator and put it back into FLDAS-like data.
"""
import xarray as xr
import numpy as np
import os
import pandas as pd
import sys
def csv_to_FLDAS(wgen_out, wgen_in, path_out, file_prefix):
'''Transform the output WGEN csv file into NetCDF files
This method transforms the output csv file from the weather generator
WGEN into a netCDF file following FLDAS format. The netCDF files are
output to path_out following the same file structure. All subfolders
are created automatically.
Args:
wgen_out: The csv file output from the weather generator
wgen_in: The csv file input to the weather generator
path_out: an existing directory in which to write out the netCDF files
file_prefix: A prefix for the netcdf files
'''
# Get the csv files into pandas dataframe
data_out = pd.read_table(wgen_out, delimiter = ',',index_col=False)
data_in = pd.read_table(wgen_in, delimiter = ',',index_col=False)
#Density of water
rho_w = 997
# Get the lat/lon bounds
min_lat = np.min(data_in['lat'])
max_lat = np.max(data_in['lat'])
min_lon = np.min(data_in['lon'])
max_lon = np.max(data_in['lon'])
# Make the coordinates
step = 0.1
lats = np.arange(min_lat,max_lat,step)
# carefully handle the end
if abs(max_lat-lats[-1])>step:
lats = np.append(lats,max_lat)
lons = np.arange(min_lon,max_lon,step)
# carefully handle the end
if abs(max_lon-lons[-1])>step:
lons = np.append(lons,max_lat)
# Add latitude/longitude to data_out
data_out['lat'] =np.nan
data_out['lon'] =np.nan
for idx, row in data_out.iterrows():
station_id = (row['id'])
data_out.at[idx,'lat'] = data_in.loc[data_in['station id'] == station_id]['lat']
data_out.at[idx,'lon'] = data_in.loc[data_in['station id'] == station_id]['lon']
# Get the time bounds
timestamp = pd.to_datetime(data_out[['year','month','day']])
# Add to dataframe
data_out['timestamp']=timestamp
# Calculate mean T
data_out['meanT'] =np.nan
for idx, row in data_out.iterrows():
data_out.at[idx,'meanT'] = np.mean([data_out['tmin'][idx],data_out['tmax'][idx]])
# Find the unique values in timestamp
timestamp_unique = timestamp.unique()
for ts in timestamp_unique:
data_out_time = data_out.loc[data_out['timestamp'] == ts]
# Tranform to panda datetime to make xarray happy.
ts = | pd.to_datetime([ts]) | pandas.to_datetime |
import os
os.system("pip3 install hyperopt")
os.system("pip3 install lightgbm")
os.system("pip3 install pandas==0.24.2")
os.system("pip3 install --upgrade scikit-learn==0.20")
os.system('pip3 install tensorflow')
os.system('pip3 install keras')
#os.system("pip3 install fastFM")
import copy
import numpy as np
import pandas as pd
import time
from automl import predict, train, validate,oneHotEncoding, oneHotEncodingCSRMatrix, train_and_predict, train_fm_keras, train_fm_keras_batch
from CONSTANT import MAIN_TABLE_NAME
import CONSTANT
from merge import merge_table
from preprocess import clean_df, clean_tables, feature_engineer
from util import Config, log, show_dataframe, timeit
import tensorflow as tf
from sklearn import metrics
from feature_expansion import cat_value_counts
class Model:
def __init__(self, info):
#print("Time before init: %s"%str(time.time()))
self.config = Config(info)
#print(self.config["start_time"])
#print("Time after init: %s"%str(time.time()))
#print(self.config.time)
#input()
self.tables = None
self.y = None
@timeit
def fit(self, Xs, y, time_remain):
self.tables = copy.deepcopy(Xs)
self.y = y
# clean_tables(Xs)
# X = merge_table(Xs, self.config)
# clean_df(X)
# feature_engineer(X, self.config)
#train(X, y, self.config)
@timeit
def predict(self, X_test, time_remain):
### calculate time range
'''Xs = self.tables
main_table = Xs[MAIN_TABLE_NAME]
print(main_table.columns)
input()
min_train_time = np.min(main_table[[c for c in main_table.columns if c.startswith(CONSTANT.TIME_PREFIX)]])
max_train_time = np.max(main_table[[c for c in main_table.columns if c.startswith(CONSTANT.TIME_PREFIX)]])
min_test_time = np.min(X_test[[c for c in X_test.columns if c.startswith(CONSTANT.TIME_PREFIX)]])
max_test_time = np.max(X_test[[c for c in X_test.columns if c.startswith(CONSTANT.TIME_PREFIX)]])
print("minimum time in training dataset %s"%str(min_train_time))
print("maximum time in training dataset %s"%str(max_train_time))
print("minimum time in testing dataset %s"%str(min_test_time))
print("maximum time in testing dataset %s"%str(max_test_time))
return None'''
### test concept drift
'''Xs = self.tables
main_table = Xs[MAIN_TABLE_NAME]
#main_table = pd.concat([main_table, X_test], keys=['train', 'test'])
#main_table.index = main_table.index.map(lambda x: f"{x[0]}_{x[1]}")
main_table = pd.concat([main_table, self.y], axis=1)
time_feature = [c for c in main_table.columns if c.startswith(CONSTANT.TIME_PREFIX)]
main_table = main_table.sort_values(time_feature)
number_test = int(main_table.shape[0]*0.2)
X_test = main_table.tail(number_test)
X_test.index = range(X_test.shape[0])
main_table = main_table.head(main_table.shape[0] - number_test)
main_table.index = range(main_table.shape[0])
min_train_time = np.min(main_table[time_feature])
max_train_time = np.max(main_table[time_feature])
min_test_time = np.min(X_test[time_feature])
max_test_time = np.max(X_test[time_feature])
print("minimum time in training dataset %s"%str(min_train_time))
print("maximum time in training dataset %s"%str(max_train_time))
print("minimum time in testing dataset %s"%str(min_test_time))
print("maximum time in testing dataset %s"%str(max_test_time))
y_test = X_test[X_test.columns[-1]]
X_test = X_test[X_test.columns[0:-1]]
y_train = main_table[main_table.columns[-1]]
main_table = main_table[main_table.columns[0:-1]]
main_table = pd.concat([main_table, X_test], keys=['train', 'test'])
main_table.index = main_table.index.map(lambda x: f"{x[0]}_{x[1]}")
Xs[MAIN_TABLE_NAME] = main_table
clean_tables(Xs)
X = merge_table(Xs, self.config)
clean_df(X)
feature_engineer(X, self.config)
cat_features = []
for col in X.columns:
if "c_" in col and "ROLLING" not in col and "cnt" not in col:
cat_features.append(col)
X, _ = cat_value_counts(X, cat_features)
X_train = X[X.index.str.startswith("train")]
X_test = X[X.index.str.startswith("test")]
train(X_train, y_train, self.config)
result = predict(X_test, self.config)
fpr, tpr, thresholds=metrics.roc_curve(y_test.values, result, pos_label=1)
print("test auc is %.4f"%(metrics.auc(fpr, tpr)))
return None'''
Xs = self.tables
main_table = Xs[MAIN_TABLE_NAME]
main_table = pd.concat([main_table, X_test], keys=['train', 'test'])
main_table.index = main_table.index.map(lambda x: f"{x[0]}_{x[1]}")
Xs[MAIN_TABLE_NAME] = main_table
clean_tables(Xs)
X = merge_table(Xs, self.config)
clean_df(X)
feature_engineer(X, self.config)
diff = X.max() - X.min()
threshold = 1e-6
X = X[X.columns[diff>threshold]]
print("There are %d columns of trivial features"%(diff.shape[0] - X.shape[1]))
'''cat_features = []
for col in X.columns:
if "c_" in col and "ROLLING" not in col and "cnt" not in col:
cat_features.append(col)'''
#X, _ = cat_value_counts(X, cat_features)
#X = pd.get_dummies(X, columns = X.columns, sparse=True)
#cumulative_shift, X = oneHotEncoding(X)
#self.config["cumulative_shift"] = cumulative_shift
X_train, X, one_hot_features, all_features = oneHotEncodingCSRMatrix(X)
#cumulative_shift = X.shape[1]
self.config["cumulative_shift"] = all_features
y = self.y.values
result=None
#X_train = X[X.index.str.startswith("train")]
#train(X_train, y, self.config)
#X = X[X.index.str.startswith("test")]
#X.index = X.index.map(lambda x: int(x.split('_')[1]))
#X.sort_index(inplace=True)
#result = predict(X, self.config)
#result = train_fm_keras(X_train, X, y, self.config, one_hot_features)
#input()
result = train_fm_keras_batch(X_train, X, y, self.config, one_hot_features)
#result = train_and_predict(X_train, y, X, self.config, one_hot_features)
'''tf.reset_default_graph()
from tensorflow.python.summary.writer import writer_cache
#print(writer_cache.FileWriterCache.get('./models/eval'))
writer_cache.FileWriterCache.clear()
input()
os.system("rm -r ./models/*")'''
'''os.system("rm -r ./models/model.*")
os.system("rm -r ./models/check*")
os.system("rm -r ./models/graph.*")
os.system("rm -r ./models/eval/*")'''
return | pd.Series(result) | pandas.Series |
import nose
import warnings
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u, PY3
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, Categorical)
from pandas.core.common import PerformanceWarning
from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_categorical_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
patch)
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas import Timestamp, NaT, tslib
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
_multiprocess_can_split_ = False
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
elif isinstance(a, Categorical):
# Temp,
# Categorical.categories is changed from str to bytes in PY3
# maybe the same as GH 13591
if PY3 and b.categories.inferred_type == 'string':
pass
else:
tm.assert_categorical_equal(a, b)
elif a is NaT:
assert b is NaT
elif isinstance(a, Timestamp):
assert a == b
assert a.freq == b.freq
else:
assert(a == b)
class TestPackers(tm.TestCase):
def setUp(self):
self.path = '__%s__.msg' % | tm.rands(10) | pandas.util.testing.rands |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import tempfile
import time
from collections import OrderedDict
from datetime import datetime
from string import printable
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
try:
import fastparquet
except ImportError: # pragma: no cover
fastparquet = None
try:
import sqlalchemy
except ImportError: # pragma: no cover
sqlalchemy = None
from .... import tensor as mt
from .... import dataframe as md
from ....config import option_context
from ....tests.core import require_cudf, require_ray
from ....utils import arrow_array_to_objects, lazy_import, pd_release_version
from ..dataframe import from_pandas as from_pandas_df
from ..series import from_pandas as from_pandas_series
from ..index import from_pandas as from_pandas_index, from_tileable
from ..from_tensor import dataframe_from_tensor, dataframe_from_1d_tileables
from ..from_records import from_records
ray = lazy_import("ray")
_date_range_use_inclusive = pd_release_version[:2] >= (1, 4)
def test_from_pandas_dataframe_execution(setup):
# test empty DataFrame
pdf = pd.DataFrame()
df = from_pandas_df(pdf)
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
pdf = pd.DataFrame(columns=list("ab"))
df = from_pandas_df(pdf)
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
pdf = pd.DataFrame(
np.random.rand(20, 30), index=[np.arange(20), np.arange(20, 0, -1)]
)
df = from_pandas_df(pdf, chunk_size=(13, 21))
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
def test_from_pandas_series_execution(setup):
# test empty Series
ps = pd.Series(name="a")
series = from_pandas_series(ps, chunk_size=13)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
series = from_pandas_series(ps)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
ps = pd.Series(
np.random.rand(20), index=[np.arange(20), np.arange(20, 0, -1)], name="a"
)
series = from_pandas_series(ps, chunk_size=13)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
def test_from_pandas_index_execution(setup):
pd_index = pd.timedelta_range("1 days", periods=10)
index = from_pandas_index(pd_index, chunk_size=7)
result = index.execute().fetch()
pd.testing.assert_index_equal(pd_index, result)
def test_index_execution(setup):
rs = np.random.RandomState(0)
pdf = pd.DataFrame(
rs.rand(20, 10),
index=np.arange(20, 0, -1),
columns=["a" + str(i) for i in range(10)],
)
df = from_pandas_df(pdf, chunk_size=13)
# test df.index
result = df.index.execute().fetch()
pd.testing.assert_index_equal(result, pdf.index)
result = df.columns.execute().fetch()
pd.testing.assert_index_equal(result, pdf.columns)
# df has unknown chunk shape on axis 0
df = df[df.a1 < 0.5]
# test df.index
result = df.index.execute().fetch()
pd.testing.assert_index_equal(result, pdf[pdf.a1 < 0.5].index)
s = pd.Series(pdf["a1"], index=pd.RangeIndex(20))
series = from_pandas_series(s, chunk_size=13)
# test series.index which has value
result = series.index.execute().fetch()
pd.testing.assert_index_equal(result, s.index)
s = pdf["a2"]
series = from_pandas_series(s, chunk_size=13)
# test series.index
result = series.index.execute().fetch()
pd.testing.assert_index_equal(result, s.index)
# test tensor
raw = rs.random(20)
t = mt.tensor(raw, chunk_size=13)
result = from_tileable(t).execute().fetch()
pd.testing.assert_index_equal(result, pd.Index(raw))
def test_initializer_execution(setup):
arr = np.random.rand(20, 30)
pdf = pd.DataFrame(arr, index=[np.arange(20), np.arange(20, 0, -1)])
df = md.DataFrame(pdf, chunk_size=(15, 10))
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
df = md.DataFrame(arr, index=md.date_range("2020-1-1", periods=20))
result = df.execute().fetch()
pd.testing.assert_frame_equal(
result, pd.DataFrame(arr, index=pd.date_range("2020-1-1", periods=20))
)
df = md.DataFrame(
{"prices": [100, 101, np.nan, 100, 89, 88]},
index=md.date_range("1/1/2010", periods=6, freq="D"),
)
result = df.execute().fetch()
pd.testing.assert_frame_equal(
result,
pd.DataFrame(
{"prices": [100, 101, np.nan, 100, 89, 88]},
index=pd.date_range("1/1/2010", periods=6, freq="D"),
),
)
s = np.random.rand(20)
ps = pd.Series(s, index=[np.arange(20), np.arange(20, 0, -1)], name="a")
series = md.Series(ps, chunk_size=7)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
series = md.Series(s, index=md.date_range("2020-1-1", periods=20))
result = series.execute().fetch()
pd.testing.assert_series_equal(
result, pd.Series(s, index=pd.date_range("2020-1-1", periods=20))
)
pi = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
index = md.Index(md.Index(pi))
result = index.execute().fetch()
| pd.testing.assert_index_equal(pi, result) | pandas.testing.assert_index_equal |
import numpy as np
import pandas as pd
import re
import MeCab
from tqdm import tqdm_notebook as tqdm
from os import path
import pickle
## 1. データの読み込み
path = "G:\我的云端硬盘\\fashion_research\data\labels.pkl"
with open(path, 'rb') as handle:
temp = pickle.load(handle)
labels = | pd.DataFrame(temp) | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import nose
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, Timestamp, isnull, notnull,
bdate_range, date_range, _np_version_under1p7)
import pandas.core.common as com
from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long
from pandas import compat, to_timedelta, tslib
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_almost_equal,
ensure_clean)
import pandas.util.testing as tm
def _skip_if_numpy_not_friendly():
# not friendly for < 1.7
if _np_version_under1p7:
raise nose.SkipTest("numpy < 1.7")
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_numeric_conversions(self):
_skip_if_numpy_not_friendly()
self.assertEqual(ct(0), np.timedelta64(0,'ns'))
self.assertEqual(ct(10), np.timedelta64(10,'ns'))
self.assertEqual(ct(10,unit='ns'), np.timedelta64(10,'ns').astype('m8[ns]'))
self.assertEqual(ct(10,unit='us'), np.timedelta64(10,'us').astype('m8[ns]'))
self.assertEqual(ct(10,unit='ms'), np.timedelta64(10,'ms').astype('m8[ns]'))
self.assertEqual(ct(10,unit='s'), np.timedelta64(10,'s').astype('m8[ns]'))
self.assertEqual(ct(10,unit='d'), np.timedelta64(10,'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
_skip_if_numpy_not_friendly()
self.assertEqual(ct(timedelta(seconds=1)), np.timedelta64(1,'s').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)), np.timedelta64(1,'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)), np.timedelta64(1,'D').astype('m8[ns]'))
def test_short_format_converters(self):
_skip_if_numpy_not_friendly()
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10,'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10,'ns'))
self.assertEqual(ct('100'), np.timedelta64(100,'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100,'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000,'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000,'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000,'ns'))
self.assertEqual(ct('100ms'), np.timedelta64(100000000,'ns'))
self.assertEqual(ct('1000ms'), np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('-1s'), -np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('1s'), np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('10s'), np.timedelta64(10000000000,'ns'))
self.assertEqual(ct('100s'), np.timedelta64(100000000000,'ns'))
self.assertEqual(ct('1000s'), np.timedelta64(1000000000000,'ns'))
self.assertEqual(ct('1d'), conv(np.timedelta64(1,'D')))
self.assertEqual(ct('-1d'), -conv(np.timedelta64(1,'D')))
self.assertEqual(ct('1D'), conv(np.timedelta64(1,'D')))
self.assertEqual(ct('10D'), conv(np.timedelta64(10,'D')))
self.assertEqual(ct('100D'), conv(np.timedelta64(100,'D')))
self.assertEqual(ct('1000D'), conv(np.timedelta64(1000,'D')))
self.assertEqual(ct('10000D'), conv(np.timedelta64(10000,'D')))
# space
self.assertEqual(ct(' 10000D '), conv(np.timedelta64(10000,'D')))
self.assertEqual(ct(' - 10000D '), -conv(np.timedelta64(10000,'D')))
# invalid
self.assertRaises(ValueError, ct, '1foo')
self.assertRaises(ValueError, ct, 'foo')
def test_full_format_converters(self):
_skip_if_numpy_not_friendly()
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1,'D')
self.assertEqual(ct('1days'), conv(d1))
self.assertEqual(ct('1days,'), conv(d1))
self.assertEqual(ct('- 1days,'), -conv(d1))
self.assertEqual(ct('00:00:01'), conv(np.timedelta64(1,'s')))
self.assertEqual(ct('06:00:01'), conv(np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('06:00:01.0'), conv(np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('06:00:01.01'), conv(np.timedelta64(1000*(6*3600+1)+10,'ms')))
self.assertEqual(ct('- 1days, 00:00:01'), -conv(d1+np.timedelta64(1,'s')))
self.assertEqual(ct('1days, 06:00:01'), conv(d1+np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('1days, 06:00:01.01'), conv(d1+np.timedelta64(1000*(6*3600+1)+10,'ms')))
# invalid
self.assertRaises(ValueError, ct, '- 1days, 00')
def test_nat_converters(self):
_skip_if_numpy_not_friendly()
self.assertEqual(to_timedelta('nat',box=False), tslib.iNaT)
self.assertEqual(to_timedelta('nan',box=False), tslib.iNaT)
def test_to_timedelta(self):
_skip_if_numpy_not_friendly()
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1,'D')
self.assertEqual(to_timedelta('1 days 06:05:01.00003',box=False), conv(d1+np.timedelta64(6*3600+5*60+1,'s')+np.timedelta64(30,'us')))
self.assertEqual(to_timedelta('15.5us',box=False), conv(np.timedelta64(15500,'ns')))
# empty string
result = to_timedelta('',box=False)
self.assertEqual(result, tslib.iNaT)
result = to_timedelta(['', ''])
self.assert_(isnull(result).all())
# pass thru
result = to_timedelta(np.array([np.timedelta64(1,'s')]))
expected = np.array([np.timedelta64(1,'s')])
tm.assert_almost_equal(result,expected)
# ints
result = np.timedelta64(0,'ns')
expected = to_timedelta(0,box=False)
self.assertEqual(result, expected)
# Series
expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])
result = to_timedelta(Series(['1d','1days 00:00:01']))
tm.assert_series_equal(result, expected)
# with units
result = Series([ np.timedelta64(0,'ns'), np.timedelta64(10,'s').astype('m8[ns]') ],dtype='m8[ns]')
expected = to_timedelta([0,10],unit='s')
tm.assert_series_equal(result, expected)
# single element conversion
v = timedelta(seconds=1)
result = to_timedelta(v,box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
v = np.timedelta64(timedelta(seconds=1))
result = to_timedelta(v,box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
def test_to_timedelta_via_apply(self):
_skip_if_numpy_not_friendly()
# GH 5458
expected = Series([np.timedelta64(1,'s')])
result = Series(['00:00:01']).apply(to_timedelta)
tm.assert_series_equal(result, expected)
result = Series([to_timedelta('00:00:01')])
tm.assert_series_equal(result, expected)
def test_timedelta_ops(self):
_skip_if_numpy_not_friendly()
# GH4984
# make sure ops return timedeltas
s = Series([Timestamp('20130101') + timedelta(seconds=i*i) for i in range(10) ])
td = s.diff()
result = td.mean()[0]
# TODO This should have returned a scalar to begin with. Hack for now.
expected = to_timedelta(timedelta(seconds=9))
tm.assert_almost_equal(result, expected)
result = td.quantile(.1)
# This properly returned a scalar.
expected = to_timedelta('00:00:02.6')
tm.assert_almost_equal(result, expected)
result = td.median()[0]
# TODO This should have returned a scalar to begin with. Hack for now.
expected = to_timedelta('00:00:08')
tm.assert_almost_equal(result, expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()[0]
expected = to_timedelta('00:01:21')
tm.assert_almost_equal(result, expected)
def test_to_timedelta_on_missing_values(self):
_skip_if_numpy_not_friendly()
# GH5438
timedelta_NaT = np.timedelta64('NaT')
actual = pd.to_timedelta(Series(['00:00:01', np.nan]))
expected = Series([np.timedelta64(1000000000, 'ns'), timedelta_NaT], dtype='<m8[ns]')
assert_series_equal(actual, expected)
actual = pd.to_timedelta(Series(['00:00:01', pd.NaT]))
assert_series_equal(actual, expected)
actual = pd.to_timedelta(np.nan)
self.assertEqual(actual, timedelta_NaT)
actual = pd.to_timedelta(pd.NaT)
self.assertEqual(actual, timedelta_NaT)
def test_timedelta_ops_with_missing_values(self):
_skip_if_numpy_not_friendly()
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
self.assertEqual(actual, scalar2)
actual = scalar2 - scalar1
self.assertEqual(actual, scalar1)
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = s1 + pd.NaT # NaT is datetime, not timedelta
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
assert_frame_equal(actual, df2)
actual = df2 - scalar1
assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 + NA
assert_frame_equal(actual, dfn)
actual = df1 - NA
assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
assert_frame_equal(actual, dfn)
def test_apply_to_timedelta(self):
_skip_if_numpy_not_friendly()
timedelta_NaT = pd.to_timedelta('NaT')
list_of_valid_strings = ['00:00:01', '00:00:02']
a = | pd.to_timedelta(list_of_valid_strings) | pandas.to_timedelta |
from fhirpy import SyncFHIRClient
import requests
from io import BytesIO
import numpy as np
import pandas as pd
from PIL import Image
from torch.utils.data import Dataset
import torchvision.transforms as transforms
class ISICDataset(Dataset):
classes = {'NV': 0, 'MEL': 1, 'BKL': 2, 'DF': 3, 'SCC': 4, 'BCC': 5, 'VASC': 6, 'AK': 7}
"""ISIC dataset."""
def __init__(self, fhir_server, fhir_port, split='train', input_size=256):
"""
Args:
fhir_server (string): Address of FHIR Server.
fhir_port (string): Port of FHIR Server.
"""
self.fhir_server = fhir_server
self.fhir_port = fhir_port
self.split = split
self.input_size = input_size
# Create an instance
client = SyncFHIRClient('http://{}:{}/fhir'.format(fhir_server, fhir_port))
# Search for patients
patients = client.resources('Patient') # Return lazy search set
patients_data = []
for patient in patients:
patient_birthDate = None
try:
patient_birthDate = patient.birthDate
except:
pass
# patinet_id, gender, birthDate
patients_data.append([patient.id, patient.gender, patient_birthDate])
patients_df = pd.DataFrame(patients_data, columns=["patient_id", "gender", "birthDate"])
# Search for media
media_list = client.resources('Media').include('Patient', 'subject')
media_data = []
for media in media_list:
media_bodySite = None
media_reasonCode = None
media_note = None
try:
media_bodySite = media.bodySite.text
except:
pass
try:
media_reasonCode = media.reasonCode[0].text
except:
pass
try:
media_note = media.note[0].text
except:
pass
media_data.append([media.subject.id, media.id, media_bodySite, media_reasonCode, media_note, media.content.url])
media_df = | pd.DataFrame(media_data, columns=["patient_id", "media_id", "bodySite", "reasonCode", "note", "image_url"]) | pandas.DataFrame |
#! -*- coding:utf-8 -*-
import os
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import bottom
"""使用当日气温,昨日气温,昨日土温, 修正模型中的参数
"""
def get_data():
filename = 'summary_report_2016-01-01_2020-09-07.txt'
origin_data = | pd.read_csv(filename, header=[0, 1], sep=" ") | pandas.read_csv |
"""
"Stacking: LGB, XGB, Cat with and without imputation (old & new LGBs),tsne,logistic"
"""
import os
from timeit import default_timer as timer
from datetime import datetime
from functools import reduce
import pandas as pd
import src.common as common
import src.config.constants as constants
import src.munging as process_data
import src.modeling as model
from sklearn.model_selection import KFold
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import LogisticRegression
common.set_timezone()
start = timer()
# Create RUN_ID
RUN_ID = datetime.now().strftime("%m%d_%H%M")
MODEL_NAME = os.path.basename(__file__).split(".")[0]
SEED = 42
EXP_DETAILS = "Stacking: LGB, XGB, Cat with and without imputation (old & new LGBs),tsne,logistic"
IS_TEST = False
PLOT_FEATURE_IMPORTANCE = False
TARGET = "claim"
MODEL_TYPE = "Ranking"
LOGGER_NAME = "ranking"
logger = common.get_logger(LOGGER_NAME, MODEL_NAME, RUN_ID, constants.LOG_DIR)
common.set_seed(SEED)
logger.info(f"Running for Model Number [{MODEL_NAME}] & [{RUN_ID}]")
common.update_tracking(RUN_ID, "model_number", MODEL_NAME, drop_incomplete_rows=True)
common.update_tracking(RUN_ID, "model_type", MODEL_TYPE)
common.update_tracking(RUN_ID, "metric", "roc_auc")
train_df, test_df, sample_submission_df = process_data.read_processed_data(
logger, constants.PROCESSED_DATA_DIR, train=True, test=True, sample_submission=True
)
# Read different submission files and merge them to create dataset
# for level 2
sub_1_predition_name = (
"sub_lgb_K5_nonull_mean_sum_max_no_imp_no_scaler_params_K_0924_1159_0.81605.gz"
)
sub_1_oof_name = (
"oof_lgb_K5_nonull_mean_sum_max_no_imp_no_scaler_params_K_0924_1159_0.81605.csv"
)
sub_1_test_pred = pd.read_csv(f"{constants.SUBMISSION_DIR}/{sub_1_predition_name}")
sub_1_oof_pred = pd.read_csv(f"{constants.OOF_DIR}/{sub_1_oof_name}")
logger.info(
f"Shape of submission and oof file {sub_1_test_pred.shape}, {sub_1_oof_pred.shape}"
)
sub_2_predition_name = (
"sub_lgb_K10_nonull_mean_sum_max_mean_imp_no_scaler_params_K_0924_1406_0.81633.gz"
)
sub_2_oof_name = (
"oof_lgb_K10_nonull_mean_sum_max_mean_imp_no_scaler_params_K_0924_1406_0.81633.csv"
)
sub_2_test_pred = pd.read_csv(f"{constants.SUBMISSION_DIR}/{sub_2_predition_name}")
sub_2_oof_pred = pd.read_csv(f"{constants.OOF_DIR}/{sub_2_oof_name}")
logger.info(
f"Shape of submission and oof file {sub_2_test_pred.shape}, {sub_2_oof_pred.shape}"
)
sub_3_predition_name = (
"sub_xgb_K10_nonull_mean_sum_max_custom_imp_StScaler_K_params_0921_2239_0.81649.gz"
)
sub_3_oof_name = (
"oof_xgb_K10_nonull_mean_sum_max_custom_imp_StScaler_K_params_0921_2239_0.81649.csv"
)
sub_3_test_pred = pd.read_csv(f"{constants.SUBMISSION_DIR}/{sub_3_predition_name}")
sub_3_oof_pred = pd.read_csv(f"{constants.OOF_DIR}/{sub_3_oof_name}")
logger.info(
f"Shape of submission and oof file {sub_3_test_pred.shape}, {sub_3_oof_pred.shape}"
)
sub_4_predition_name = (
"sub_xgb_K10_nonull_mean_sum_max_no_imp_no_scaler_K_params_0922_1630_0.81634.gz"
)
sub_4_oof_name = (
"oof_xgb_K10_nonull_mean_sum_max_no_imp_no_scaler_K_params_0922_1630_0.81634.csv"
)
sub_4_test_pred = pd.read_csv(f"{constants.SUBMISSION_DIR}/{sub_4_predition_name}")
sub_4_oof_pred = pd.read_csv(f"{constants.OOF_DIR}/{sub_4_oof_name}")
logger.info(
f"Shape of submission and oof file {sub_4_test_pred.shape}, {sub_4_oof_pred.shape}"
)
sub_5_predition_name = (
"sub_cat_K10_nonull_full_data_mean_sum_max_Kaggle_bin_params_0921_2000_0.81612.gz"
)
sub_5_oof_name = (
"oof_cat_K10_nonull_full_data_mean_sum_max_Kaggle_bin_params_0921_2000_0.81612.csv"
)
sub_5_test_pred = pd.read_csv(f"{constants.SUBMISSION_DIR}/{sub_5_predition_name}")
sub_5_oof_pred = pd.read_csv(f"{constants.OOF_DIR}/{sub_5_oof_name}")
logger.info(
f"Shape of submission and oof file {sub_5_test_pred.shape}, {sub_5_oof_pred.shape}"
)
sub_6_predition_name = (
"sub_cat_K10_nonull_mean_sum_max_noImp_noScaler_K_params_0922_0747_0.81549.gz"
)
sub_6_oof_name = (
"oof_cat_K10_nonull_mean_sum_max_noImp_noScaler_K_params_0922_0747_0.81549.csv"
)
sub_6_test_pred = pd.read_csv(f"{constants.SUBMISSION_DIR}/{sub_6_predition_name}")
sub_6_oof_pred = pd.read_csv(f"{constants.OOF_DIR}/{sub_6_oof_name}")
logger.info(
f"Shape of submission and oof file {sub_6_test_pred.shape}, {sub_6_oof_pred.shape}"
)
# New submissions
sub_7_predition_name = "sub_lgb_K10_nonull_mean_sum_max_40_48_95_3_mean_imp_no_scaler_params_K_0928_1536_0.81645.gz"
sub_7_oof_name = "oof_lgb_K10_nonull_mean_sum_max_40_48_95_3_mean_imp_no_scaler_params_K_0928_1536_0.81645.csv"
sub_7_test_pred = pd.read_csv(f"{constants.SUBMISSION_DIR}/{sub_7_predition_name}")
sub_7_oof_pred = pd.read_csv(f"{constants.OOF_DIR}/{sub_7_oof_name}")
logger.info(
f"Shape of submission and oof file {sub_7_test_pred.shape}, {sub_7_oof_pred.shape}"
)
sub_8_predition_name = "sub_lgb_K10_nonull_mean_sum_max_40_48_95_3_no_imp_no_scaler_params_K_0928_1834_0.81627.gz"
sub_8_oof_name = "oof_lgb_K10_nonull_mean_sum_max_40_48_95_3_no_imp_no_scaler_params_K_0928_1834_0.81627.csv"
sub_8_test_pred = pd.read_csv(f"{constants.SUBMISSION_DIR}/{sub_8_predition_name}")
sub_8_oof_pred = pd.read_csv(f"{constants.OOF_DIR}/{sub_8_oof_name}")
logger.info(
f"Shape of submission and oof file {sub_8_test_pred.shape}, {sub_8_oof_pred.shape}"
)
# tsne
sub_9_predition_name = "sub_lgb_K5_nonull_mean_sum_max_tsne_0917_1621_0.81337.gz"
sub_9_oof_name = "oof_lgb_K5_nonull_mean_sum_max_tsne_0917_1621_0.81337.csv"
sub_9_test_pred = pd.read_csv(f"{constants.SUBMISSION_DIR}/{sub_9_predition_name}")
sub_9_oof_pred = pd.read_csv(f"{constants.OOF_DIR}/{sub_9_oof_name}")
logger.info(
f"Shape of submission and oof file {sub_9_test_pred.shape}, {sub_9_oof_pred.shape}"
)
# all features : Didn't improve the score
sub_10_predition_name = "sub_lgb_all_features_0916_1816_0.81303.gz"
sub_10_oof_name = "oof_lgb_all_features_0916_1816_0.81303.csv"
sub_10_test_pred = pd.read_csv(f"{constants.SUBMISSION_DIR}/{sub_10_predition_name}")
sub_10_oof_pred = pd.read_csv(f"{constants.OOF_DIR}/{sub_10_oof_name}")
logger.info(
f"Shape of submission and oof file {sub_10_test_pred.shape}, {sub_10_oof_pred.shape}"
)
# Logistic Regression
sub_11_predition_name = "sub_logistic_K10_nonull_mean_sum_max_f40_48_95_3_no_imp_no_scaler_K_params_0928_1259_0.79925.gz"
sub_11_oof_name = "oof_logistic_K10_nonull_mean_sum_max_f40_48_95_3_no_imp_no_scaler_K_params_0928_1259_0.79925.csv"
sub_11_test_pred = | pd.read_csv(f"{constants.SUBMISSION_DIR}/{sub_11_predition_name}") | pandas.read_csv |
from datetime import datetime
import operator
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, bdate_range
import pandas._testing as tm
from pandas.core import ops
class TestSeriesLogicalOps:
@pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
tm.assert_series_equal(result, expected)
def test_logical_operators_bool_dtype_with_empty(self):
# GH#9016: support bitwise op for integer types
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_empty = Series([], dtype=object)
res = s_tft & s_empty
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_dtype(self):
# GH#9016: support bitwise op for integer types
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype="int64")
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_0123 & s_3333
expected = Series(range(4), dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype="int64")
tm.assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype="int8")
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype="int32")
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_scalar(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
res = s_0123 & 0
expected = Series([0] * 4)
tm.assert_series_equal(res, expected)
res = s_0123 & 1
expected = Series([0, 1, 0, 1])
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_float(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_0123 & np.NaN
with pytest.raises(TypeError, match=msg):
s_0123 & 3.14
msg = "unsupported operand type.+for &:"
with pytest.raises(TypeError, match=msg):
s_0123 & [0.1, 4, 3.14, 2]
with pytest.raises(TypeError, match=msg):
s_0123 & np.array([0.1, 4, 3.14, 2])
with pytest.raises(TypeError, match=msg):
s_0123 & Series([0.1, 4, -3.14, 2])
def test_logical_operators_int_dtype_with_str(self):
s_1111 = Series([1] * 4, dtype="int8")
msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_1111 & "a"
with pytest.raises(TypeError, match="unsupported operand.+for &"):
s_1111 & ["a", "b", "c", "d"]
def test_logical_operators_int_dtype_with_bool(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
expected = Series([False] * 4)
result = s_0123 & False
tm.assert_series_equal(result, expected)
result = s_0123 & [False]
tm.assert_series_equal(result, expected)
result = s_0123 & (False,)
tm.assert_series_equal(result, expected)
result = s_0123 ^ False
expected = Series([False, True, True, True])
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_object(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
result = s_0123 & Series([False, np.NaN, False, False])
expected = Series([False] * 4)
tm.assert_series_equal(result, expected)
s_abNd = Series(["a", "b", np.NaN, "d"])
with pytest.raises(TypeError, match="unsupported.* 'int' and 'str'"):
s_0123 & s_abNd
def test_logical_operators_bool_dtype_with_int(self):
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
res = s_tft & 0
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft & 1
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_ops_bool_dtype_with_ndarray(self):
# make sure we operate on ndarray the same as Series
left = Series([True, True, True, False, True])
right = [True, False, None, True, np.nan]
expected = Series([True, False, False, False, False])
result = left & right
tm.assert_series_equal(result, expected)
result = left & np.array(right)
tm.assert_series_equal(result, expected)
result = left & Index(right)
tm.assert_series_equal(result, expected)
result = left & Series(right)
tm.assert_series_equal(result, expected)
expected = Series([True, True, True, True, True])
result = left | right
tm.assert_series_equal(result, expected)
result = left | np.array(right)
tm.assert_series_equal(result, expected)
result = left | Index(right)
tm.assert_series_equal(result, expected)
result = left | Series(right)
tm.assert_series_equal(result, expected)
expected = Series([False, True, True, True, True])
result = left ^ right
tm.assert_series_equal(result, expected)
result = left ^ np.array(right)
tm.assert_series_equal(result, expected)
result = left ^ Index(right)
tm.assert_series_equal(result, expected)
result = left ^ Series(right)
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_bool_dtype_and_reindex(self):
# GH#9016: support bitwise op for integer types
# with non-matching indexes, logical operators will cast to object
# before operating
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_tft = Series([True, False, True], index=index)
s_tff = Series([True, False, False], index=index)
s_0123 = Series(range(4), dtype="int64")
# s_0123 will be all false now because of reindexing like s_tft
expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
result = s_tft & s_0123
tm.assert_series_equal(result, expected)
expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
result = s_0123 & s_tft
tm.assert_series_equal(result, expected)
s_a0b1c0 = Series([1], list("b"))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list("abc"))
tm.assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list("abc"))
tm.assert_series_equal(res, expected)
def test_scalar_na_logical_ops_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s & datetime(2005, 1, 1)
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
result = s & list(s)
tm.assert_series_equal(result, expected)
def test_scalar_na_logical_ops_corners_aligns(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
d = DataFrame({"A": s})
expected = DataFrame(False, index=range(9), columns=["A"] + list(range(9)))
result = s & d
tm.assert_frame_equal(result, expected)
result = d & s
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", [operator.and_, operator.or_, operator.xor])
def test_logical_ops_with_index(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))])
result = op(ser, idx1)
tm.assert_series_equal(result, expected)
expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))], dtype=bool)
result = op(ser, idx2)
tm.assert_series_equal(result, expected)
def test_reversed_xor_with_index_returns_index(self):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Index.symmetric_difference(idx1, ser)
with tm.assert_produces_warning(FutureWarning):
result = idx1 ^ ser
tm.assert_index_equal(result, expected)
expected = Index.symmetric_difference(idx2, ser)
with tm.assert_produces_warning(FutureWarning):
result = idx2 ^ ser
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"op",
[
pytest.param(
ops.rand_,
marks=pytest.mark.xfail(
reason="GH#22092 Index __and__ returns Index intersection",
raises=AssertionError,
strict=True,
),
),
pytest.param(
ops.ror_,
marks=pytest.mark.xfail(
reason="GH#22092 Index __or__ returns Index union",
raises=AssertionError,
strict=True,
),
),
],
)
def test_reversed_logical_op_with_index_returns_series(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series(op(idx1.values, ser.values))
with tm.assert_produces_warning(FutureWarning):
result = op(ser, idx1)
tm.assert_series_equal(result, expected)
expected = Series(op(idx2.values, ser.values))
with tm.assert_produces_warning(FutureWarning):
result = op(ser, idx2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"op, expected",
[
(ops.rand_, Index([False, True])),
(ops.ror_, Index([False, True])),
(ops.rxor, Index([])),
],
)
def test_reverse_ops_with_index(self, op, expected):
# https://github.com/pandas-dev/pandas/pull/23628
# multi-set Index ops are buggy, so let's avoid duplicates...
ser = Series([True, False])
idx = Index([False, True])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# behaving as set ops is deprecated, will become logical ops
result = op(ser, idx)
tm.assert_index_equal(result, expected)
def test_logical_ops_label_based(self):
# GH#4947
# logical ops should be label based
a = Series([True, False, True], list("bca"))
b = Series([False, True, False], list("abc"))
expected = Series([False, True, False], list("abc"))
result = a & b
tm.assert_series_equal(result, expected)
expected = Series([True, True, False], list("abc"))
result = a | b
tm.assert_series_equal(result, expected)
expected = Series([True, False, False], list("abc"))
result = a ^ b
tm.assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list("bca"))
b = Series([False, True, False, True], list("abcd"))
expected = Series([False, True, False, False], list("abcd"))
result = a & b
tm.assert_series_equal(result, expected)
expected = Series([True, True, False, False], list("abcd"))
result = a | b
tm.assert_series_equal(result, expected)
# filling
# vs empty
empty = Series([], dtype=object)
result = a & empty.copy()
expected = Series([False, False, False], list("bca"))
tm.assert_series_equal(result, expected)
result = a | empty.copy()
expected = Series([True, False, True], list("bca"))
tm.assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ["z"])
expected = Series([False, False, False, False], list("abcz"))
tm.assert_series_equal(result, expected)
result = a | Series([1], ["z"])
expected = Series([True, True, False, False], list("abcz"))
tm.assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [
empty.copy(),
Series([1], ["z"]),
Series(np.nan, b.index),
Series(np.nan, a.index),
]:
result = a[a | e]
tm.assert_series_equal(result, a[a])
for e in [Series(["z"])]:
result = a[a | e]
tm.assert_series_equal(result, a[a])
# vs scalars
index = list("bca")
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
tm.assert_series_equal(result, expected)
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
for v in [np.nan, "foo"]:
with pytest.raises(TypeError, match=msg):
t | v
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
tm.assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import argparse
import sys
import os
from pathlib import Path
import logging
from typing import Dict
import numpy as np
import pandas as pd
import scipy.sparse as sp
from joblib import dump
from knodle.trainer.utils import log_section
from examples.data_preprocessing.tac_based_dataset.utils.utils import count_file_lines, encode_labels
logger = logging.getLogger(__name__)
PRINT_EVERY = 1000000
Z_MATRIX_OUTPUT_TRAIN = "train_rule_matches_z.lib"
Z_MATRIX_OUTPUT_DEV = "dev_rule_matches_z.lib"
Z_MATRIX_OUTPUT_TEST = "test_rule_matches_z.lib"
T_MATRIX_OUTPUT_TRAIN = "mapping_rules_labels.lib"
TRAIN_SAMPLES_OUTPUT = "df_train.lib"
DEV_SAMPLES_OUTPUT = "df_dev.lib"
TEST_SAMPLES_OUTPUT = "df_test.lib"
def preprocess_data(
path_train_data: str,
path_dev_data: str,
path_test_data: str,
path_labels: str,
path_lfs: str,
path_output: str
) -> None:
""" This function reads train and dev data and saved resulted files to output directory"""
Path(path_output).mkdir(parents=True, exist_ok=True)
labels2ids = get_labels(path_labels)
num_labels = len(labels2ids)
other_class_id = max(labels2ids.values()) + 1 # used for dev and test sets
lfs = | pd.read_csv(path_lfs) | pandas.read_csv |
import os, sys
import pandas as pd
import numpy as np
import pickle
import argparse
'''
It feels like the progress in this analysis pipeline is a bit
too slow, but it is always slow.
'''
def hammingDist(str1, str2):
'''
Calculating hamming distance of two strings
https://www.geeksforgeeks.org/hamming-distance-two-strings/
'''
i = 0
count = 0
while(i < len(str1)):
if(str1[i] != str2[i]):
count += 1
i += 1
return count
def ec_cellBC(cellBC, cr_cellBC_list):
pop_list = []
for cr_cellBC in cr_cellBC_list:
hamming = hammingDist(cellBC, cr_cellBC)
if hamming <= 3:
pop_list.append([cr_cellBC, hamming])
if len(pop_list) == 1:
return pop_list[0][0], pop_list[0][1]
else:
#print(f'{cellBC} is a bad cell!')
return 0, 16
def cell_bc_ec(quad, cr_cellBC_list):
'''
Input: quad: tsv file with columns: cellBC, UMI, pBC, rBC, and read count
cellBC: a tsv file with cellranger corrected cell barcodes.
Output: cellranger error corrected quad file with the same structure.
'''
barcode_mapping_dict = {}
# First extract the quad_cellBC set from quad file
quad_cellBC = list(set(quad['cellBC'].values))
# Then error correct the quad_cellBCs if it is in the cellBC list or within
# If the cellBC is the exact match to the cr_cellBC, then the cellBC is kept
for cellBC in quad_cellBC:
if cellBC in cr_cellBC_list:
barcode_mapping_dict[cellBC] = cellBC
else:
close_mapping, hamming = ec_cellBC(cellBC, cr_cellBC_list)
if hamming <= 3:
if close_mapping != 0:
barcode_mapping_dict[cellBC] = close_mapping
# Then we go through all the list of cellBCs in the quad file and only record
# the ones whose cellBC is in the barcode_mapping_dict!
pop_list = []
for _, line in quad.iterrows():
cellBC = line['cellBC']
if cellBC in barcode_mapping_dict:
pop_list.append([barcode_mapping_dict[cellBC], line['umi'], line['pBC'], line['rBC'], line['counts']])
pop_quad = | pd.DataFrame(pop_list,columns= ['cellBC', 'umi', 'pBC', 'rBC', 'counts']) | pandas.DataFrame |
import csv
import glob
import re
from pathlib import Path
import numpy as np
import pandas as pd
from model import LSTM, ForecastDataset
from plumbum import cli
from preprocessor import Preprocessor
class ClusterForecaster:
"""
Predict cluster in workload using trained LSTMs.
Attributes
----------
prediction_interval : pd.Timedelta
Time interval to aggregate cluster counts by.
prediction_horizon : pd.Timedelta
The prediction horizon of the models to train.
prediction_seqlen : int
Number of intervals to feed the LSTM for a prediction.
models : Dict[int, LSTM]
Dictionary of trained models to perform inference by
"""
MODEL_PREFIX = "model_"
@staticmethod
def cluster_to_file(path, cluster):
"""Generate model file path from cluster name"""
return f"{path}/{ClusterForecaster.MODEL_PREFIX}{cluster}.pkl"
@staticmethod
def get_cluster_from_file(filename):
"""Infer cluster id from file name"""
m = re.search(f"(?<={ClusterForecaster.MODEL_PREFIX})[^/]*(?=\\.pkl)", filename)
if m is None:
raise RuntimeError("Could not get cluster name")
return m[0]
def __init__(
self,
train_df,
prediction_seqlen,
prediction_interval,
prediction_horizon,
save_path,
top_k=5,
override=False,
):
"""Construct the ClusterForecaster object.
Parameters
----------
train_df : pd.DataFrame
Training data grouped by cluster and timestamp
save_path : str
Directory for loading/saving trained models
top_k : int
Only train models for the top k most common clusters.
override : bool
Determines whether we should (re)train models anyway, even if they are
in the directory.
"""
assert train_df.index.names[0] == "cluster"
assert train_df.index.names[1] == "log_time_s"
self.prediction_seqlen = prediction_seqlen
self.prediction_interval = prediction_interval
self.prediction_horizon = prediction_horizon
self.models = {}
if not override:
model_files = glob.glob(str(Path(save_path) / f"{self.MODEL_PREFIX}*.pkl"))
for filename in model_files:
cluster_name = self.get_cluster_from_file(filename)
self.models[int(cluster_name)] = LSTM.load(filename)
print(f"loaded model for cluster {cluster_name}")
print(f"Loaded {len(model_files)} models")
if train_df is None:
return
# Only consider top k clusters.
cluster_totals = train_df.groupby(level=0).sum().sort_values(by="count", ascending=False)
labels = cluster_totals.index[:top_k]
print("Training on cluster time series..")
mintime = train_df.index.get_level_values(1).min()
maxtime = train_df.index.get_level_values(1).max()
dtindex = pd.DatetimeIndex([mintime, maxtime])
for cluster in labels:
if cluster in self.models and not override:
print(f"Already have model for cluster {cluster}, skipping")
continue
print(f"training model for cluster {cluster}")
cluster_counts = train_df[train_df.index.get_level_values(0) == cluster].droplevel(0)
# This zero-fills the start and ends of the cluster time series.
cluster_counts = cluster_counts.reindex(cluster_counts.index.append(dtindex), fill_value=0)
cluster_counts = cluster_counts.resample(prediction_interval).sum()
self._train_cluster(cluster_counts, cluster, save_path)
def _train_cluster(self, cluster_counts, cluster, save_path):
dataset = ForecastDataset(
cluster_counts,
sequence_length=self.prediction_seqlen,
horizon=self.prediction_horizon,
interval=self.prediction_interval,
)
self.models[cluster] = LSTM(
horizon=self.prediction_horizon,
interval=self.prediction_interval,
sequence_length=self.prediction_seqlen,
)
self.models[cluster].fit(dataset)
self.models[cluster].save(self.cluster_to_file(save_path, cluster))
def predict(self, cluster_df, cluster, start_time, end_time):
"""
Given a cluster dataset, attempt to return prediction of query count
from a cluster within the given time-range.
"""
assert cluster_df.index.names[0] == "cluster"
assert cluster_df.index.names[1] == "log_time_s"
if cluster not in cluster_df.index.get_level_values(0):
print(f"Could not find cluster {cluster} in cluster_df")
return None
cluster_counts = cluster_df[cluster_df.index.get_level_values(0) == cluster].droplevel(0)
# Truncate cluster_df to the time range necessary to generate prediction range.
# TODO(Mike): Right now, if the sequence required to predict a certain interval
# is not present in the data, we simply do not make any predictions (i.e. return 0)
# Should we produce a warning/error so the user is aware there is insufficient
# data?
trunc_start = start_time - self.prediction_horizon - (self.prediction_seqlen) * self.prediction_interval
trunc_end = end_time - self.prediction_horizon
truncated = cluster_counts[(cluster_counts.index >= trunc_start) & (cluster_counts.index < trunc_end)]
dataset = ForecastDataset(
truncated,
sequence_length=self.prediction_seqlen,
horizon=self.prediction_horizon,
interval=self.prediction_interval,
)
# generate predictions
predictions = [self.models[cluster].predict(seq) for seq, _ in dataset]
# tag with timestamps
pred_arr = [[dataset.get_y_timestamp(i), pred] for i, pred in enumerate(predictions)]
pred_df = pd.DataFrame(pred_arr, columns=["log_time_s", "count"])
pred_df.set_index("log_time_s", inplace=True)
return pred_df[start_time:]
class WorkloadGenerator:
"""
Use preprocessed query template/params and cluster to generate
representative workload.
"""
def __init__(self, preprocessor, assignment_df):
df = preprocessor.get_grouped_dataframe_interval()
# Join to cluster and group by.
joined = df.join(assignment_df)
# Calculate weight of template within each cluster.
joined["cluster"].fillna(-1, inplace=True)
summed = joined.groupby(["cluster", "query_template"]).sum()
self._preprocessor = preprocessor
self._percentages = summed / summed.groupby(level=0).sum()
def get_workload(self, cluster, cluster_count):
"""Given a cluster id and a sample size, produce a "sample" workload,
sampling from the preprocessed queries.
Parameters
----------
cluster : int
The cluster to generate for
cluster_count : scalar
The number of queries to sample
Returns
-------
predicted_queries : pd.Dataframe
A sampled workload in the form of (query, count) pairs
"""
templates = self._percentages[self._percentages.index.get_level_values(0) == cluster].droplevel(0)
templates = templates * cluster_count
# TODO(Mike): The true sample of parameters might be too inefficient,
# But using the same parameters for all queries is not representative enough.
# True sample of parameters.
# templates_with_param_vecs = [
# (template, self._preprocessor.sample_params(template, int(count)))
# for template, count in zip(templates.index.values, templates.values)
# ]
# Sample parameters once. Then use the same parameters
# for all queries in the query template.
templates_with_param_vecs = [
(
template,
np.tile(self._preprocessor.sample_params(template, 1)[0], (int(count), 1)),
)
for template, count in zip(templates.index.values, templates.values)
]
workload = [
self._preprocessor.substitute_params(template, param_vec)
for template, param_vecs in templates_with_param_vecs
for param_vec in param_vecs
]
workload = pd.DataFrame(workload, columns=["query"])
predicted_queries = workload.groupby("query").size().sort_values(ascending=False)
return predicted_queries
class ForecasterCLI(cli.Application):
preprocessor_parquet = cli.SwitchAttr(["-p", "--preprocessor-parquet"], str, mandatory=True)
clusterer_parquet = cli.SwitchAttr(["-c", "--clusterer-parquet"], str, mandatory=True)
model_path = cli.SwitchAttr(["-m", "--model-path"], str, mandatory=True)
override = cli.Flag("--override-models")
start_ts = cli.SwitchAttr(["-s", "--start-time"], str, mandatory=True)
end_ts = cli.SwitchAttr(["-e", "--end-time"], str, mandatory=True)
output_csv = cli.SwitchAttr("--output-csv", str, mandatory=True)
def main(self):
pred_interval = | pd.Timedelta(seconds=1) | pandas.Timedelta |
import requests
from pandas import DataFrame as df, read_csv, Series, to_datetime
from io import StringIO
import numpy as np
class cci30(object):
def __init__(self):
self._CCI30_INDEX_PRICE_URL = "https://cci30.com/ajax/getIndexHistory.php"
self._CCI30_CONSTITUENTS_URL = "https://cci30.com/ajax/getMonthlyPropWeightHistoryProd.php"
def getIndexPrices(self) -> Series:
"""
Retrieves and returns CCi30 index value for every day.
:return: Series, the index value.
:raise Exception: if the get request fails
"""
response = requests.get(self._CCI30_INDEX_PRICE_URL)
if response.status_code == 200:
cci30Series = read_csv(StringIO(response.text), sep=',', index_col='Date')['Close']
cci30Series.index = | to_datetime(cci30Series.index) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
import urllib
import numpy as np
import json
from tqdm.autonotebook import tqdm
#%matplotlib inline
tqdm.pandas()#tqdm)
# import jellyfish
import dask.dataframe as dd
# from dask.multiprocessing import get
from importlib import reload
import AddressCleanserUtils
reload(AddressCleanserUtils)
from AddressCleanserUtils import *
# import multiprocessing
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# In[ ]:
# In[3]:
starting_time = datetime.now()
# In[4]:
config_file = "config_batch"
address_file = "./address.csv.gz"
sample_size = None
import sys, getopt
opts, args = getopt.getopt(sys.argv[1:],"f:c:a:s:vq",[])
for opt, arg in opts:
if opt == "-c":
config_file = arg
if opt == "-a":
address_file = arg
if opt == "-f":
print("Run in jupyter ...", arg)
AddressCleanserUtils.within_jupyter=True
if opt == "-s":
sample_size = int(arg)
if opt == "-v": # Verbose
logger.setLevel(logging.DEBUG)
if opt == "-q": # quiet
logger.setLevel(logging.WARNING)
# In[24]:
if AddressCleanserUtils.within_jupyter :
log("Running in Jupyter, using hardcoded parameters")
# config_file = "config_best"
# address_file = "./best.csv.gz"
config_file = "config_batch"
address_file = "./address.csv.gz"
sample_size = 10000
AddressCleanserUtils.photon_host = "127.0.0.1:2322"
AddressCleanserUtils.libpostal_host = "172.18.0.3:6060"
# with_dask=False
# %matplotlib inline
# In[6]:
import importlib
log(f"Loading config file {config_file}")
config_module = importlib.import_module(config_file)
# In[7]:
# Check that some required variables are present in the configuration file
field_names = ["street_field","housenbr_field","city_field","postcode_field", "country_field", "addr_key_field"]
#other_var_names = ["photon_host","osm_host","libpostal_host", "regex_replacements"]
other_var_names = ["regex_replacements"]
for var_name in field_names + other_var_names:
assert var_name in dir(config_module), var_name + " not defined in config module " + config_file
# In[ ]:
# In[8]:
AddressCleanserUtils.street_field = config_module.street_field
AddressCleanserUtils.housenbr_field = config_module.housenbr_field
AddressCleanserUtils.city_field = config_module.city_field
AddressCleanserUtils.postcode_field = config_module.postcode_field
AddressCleanserUtils.country_field = config_module.country_field
AddressCleanserUtils.addr_key_field = config_module.addr_key_field
AddressCleanserUtils.regex_replacements = config_module.regex_replacements
AddressCleanserUtils.use_osm_parent = use_osm_parent
AddressCleanserUtils.with_rest_libpostal = with_rest_libpostal
# In[9]:
AddressCleanserUtils.pbar.register()
# In[10]:
# Check that Nominatim server is running properly
try:
osm = get_osm("Bruxelles")
assert osm[0]["namedetails"]["name:fr"] == "Bruxelles"
vlog("OSM working properly")
except Exception as e:
print("OSM not up & running")
print("OSM host: ", AddressCleanserUtils.osm_host)
raise e
# In[15]:
# In old version of Nominatim, page "details.php" could NOT return a JSON result, allowing to get place details from a place id
# In newer version, this has been added, allowing to get details about the parent of a place
# Is case "use_osm_parent" is true, check that "details.php" works correctly
if AddressCleanserUtils.use_osm_parent:
try :
osm_det = get_osm_details(osm[0]["place_id"])
assert osm_det["place_id"] == osm[0]["place_id"]
vlog("OSM details working properly")
except Exception as e:
print("OSM details not working")
print("OSM host: ", AddressCleanserUtils.osm_host)
raise e
# In[18]:
# Check that Photon server is running properly
try:
ph = get_photon("Bruxelles")
assert ph["features"][0]["properties"]["name"] == "Brussels"
vlog("Photon working properly")
except Exception as e:
print("Photon not up & running ; Start it with 'nohup java -jar photon-*.jar &'")
print("Photon host: ", AddressCleanserUtils.photon_host)
raise e
# In[25]:
# Check that Libpostal is running properly
try:
lpost = parse_address("Bruxelles")
assert lpost[0][0] == "bruxelles"
vlog("Libpostal working properly")
except Exception as e:
print("Libpostal not up & running ")
print("Libpostal: ", AddressCleanserUtils.libpostal_host)
raise e
# # Data preparation
# In[ ]:
# Get the addresses dataframe. Config module has to contain a "get_addresses(filename)" function, returning a dataframe, with
# column names defined by variables (defined in config module) : street_field, housenbr_field, city_field, postcode_field , addr_key_field
log("Getting addresses")
addresses = config_module.get_addresses(address_file)
log(f"Got {addresses.shape[0]} addresses")
log(addresses)
# In[14]:
if sample_size and sample_size < addresses.shape[0]:
log(f"Keep a sample of size {sample_size}")
addresses = addresses.sample(sample_size)
# In[15]:
# Check that all required fields are present in addresses dataframe
for field in field_names:
assert config_module.__getattribute__(field) in addresses, f"Field {field} missing in data !"
# In[16]:
# Check that the address identifier defined in config_module.addr_key_field is unique
assert addresses[addresses[config_module.addr_key_field].duplicated()].shape[0] == 0, "Key should be unique"
# In[17]:
vlog("Stripping and upper casing...")
addresses = addresses.apply(lambda col: col.fillna("").astype(str).str.strip().str.upper() if col.dtype.kind=='O' else col.astype(str) )
# # Main loop
# In[18]:
transformers_sequence = [ ["orig"],
["regex[init]"],
["nonum"],
["libpostal", "regex[lpost]"],
["libpostal", "regex[lpost]", "nonum"],
["libpostal", "regex[lpost]", "photon"],
["libpostal", "regex[lpost]", "photon", "nonum"],
["photon"],
["photon", "nonum"],
["nostreet"]
]
# In[19]:
def main_loop(chunk):
"""
Method "main_loop" processes the full cleansing sequence on a chunk of addresses :
- Apply a sequence of transformers (possibly empty)
- Sent the (transformed) addresses to Nominatim
- Parse and Check the results
- For the addresses with no (accepted) result, try the next sequence of transformers
"""
log(f"Chunk size: {chunk.shape[0]}")
vlog(chunk)
osm_addresses = pd.DataFrame()
rejected_addresses = pd.DataFrame()
stats = []
init_chunk_size = chunk.shape[0]
for transformers in transformers_sequence:
vlog("--------------------------")
vlog(f"| Transformers : { ';'.join(transformers) }")
vlog("--------------------------")
# display(chunk)
osm_results, rejected, step_stats = transform_and_process(chunk, transformers, config_module.addr_key_field,
config_module.street_field, config_module.housenbr_field,
config_module.city_field, config_module.postcode_field,
config_module.country_field,
check_osm_results=check_osm_results)
osm_addresses = osm_addresses.append(osm_results, sort=False).drop_duplicates()
rejected_addresses = rejected_addresses.append(rejected, sort=False).drop_duplicates()
vlog("Results: ")
vlog(osm_results.head())
vlog(osm_results.shape)
vlog(f"Match rate so far: {osm_addresses.shape[0] / init_chunk_size if init_chunk_size > 0 else '(empty chunk size)'}")
stats.append(step_stats)
vlog(step_stats)
chunk = chunk[~chunk[config_module.addr_key_field].isin(osm_results[config_module.addr_key_field])].copy()
ts = AddressCleanserUtils.timestats
tot = np.sum([ts[key] for key in ts])
if tot.total_seconds()>0:
for key in ts:
vlog(f"{key:12}: {ts[key]} ({100*ts[key]/tot:.3} %)")
vlog("")
vlog("")
vlog("####################")
vlog("")
vlog("")
log("Chunk results: ")
log(osm_addresses)
log(f"Chunk match rate: {(osm_addresses.shape[0] / init_chunk_size) if init_chunk_size > 0 else '(empty chunk size)'}")
log(pd.DataFrame(stats))
return osm_addresses, rejected_addresses, stats
# In[ ]:
# In[20]:
# Compute the number of chunks
min_nb_chunks= 4
if addresses.shape[0] > max_chunk_size * min_nb_chunks:
chunk_size = max_chunk_size
elif addresses.shape[0] < min_chunk_size * min_nb_chunks:
chunk_size = min_chunk_size
else:
chunk_size = int(np.sqrt(max_chunk_size *min_chunk_size))
log(f"Chunk_size: {chunk_size}")
# Do the main processing, with dask or simply in sequential chunks.
#
# Processing a chunk may require at some point a huge amount of memory. A single chunk with a few millions addresses may result in memory error ; this is why we split the main addresses dataframe is smaller chunks.
#
# In[21]:
stats = []
if with_dask :
from dask.diagnostics import Profiler, ResourceProfiler
#AddressCleanserUtils.with_dask = False
# Sorting : allow to increase the probability to have duplicates within a chunk
dd_to_process = dd.from_pandas(addresses.sort_values([config_module.postcode_field, config_module.street_field]).reset_index(drop=True),
chunksize=chunk_size)
dask_task = dd_to_process.map_partitions(main_loop)
with Profiler() as prof, ResourceProfiler() as rprof :
res = dask_task.compute(scheduler='processes')
log("All chunks done, gather all results...")
osm_addresses = pd.concat([chunk_osm_addresses for (chunk_osm_addresses, _, _) in res], sort=False)
rejected_addresses = pd.concat([chunk_rejected_addresses for (_, chunk_rejected_addresses, _) in res], sort=False)
for (_, _, chunk_stats) in res:
stats.extend(chunk_stats)
log(f"Global match rate: { osm_addresses.shape[0]/addresses.shape[0] } ")
else:
#AddressCleanserUtils.with_dask = True
osm_addresses = pd.DataFrame()
rejected_addresses = pd.DataFrame()
chunks_boundaries = range(chunk_size, addresses.shape[0] , chunk_size)
for chunk in tqdm(np.array_split(addresses.sort_values([config_module.postcode_field, config_module.street_field]), chunks_boundaries)):
chunk_osm_addresses, chunk_rejected_addresses, chunk_stats = main_loop(chunk)
osm_addresses = osm_addresses.append(chunk_osm_addresses, sort=False).drop_duplicates()
rejected_addresses = rejected_addresses.append(chunk_rejected_addresses, sort=False).drop_duplicates()
log(f"Global match rate so far: {osm_addresses.shape[0]/addresses.shape[0]}")
stats.extend(chunk_stats)
# In[22]:
# inclusion_test("NEU", "NEUCHATEAU")
# In[23]:
addresses
# In[24]:
# get_osm("6840 NEUFCHÂTEAU")
# In[25]:
if with_dask:
from dask.diagnostics import visualize
from bokeh.io import output_notebook, output_file
output_file("dask_stats.html")
# output_notebook()
visualize([prof, rprof])
# In[26]:
# osm_addresses.SIM_street_which.value_counts() /osm_addresses.shape[0] #.plot.pie()
# # Rejected addresses
# Give some statistics about rejected adresses.
# "rejected_addresses" contains two types of rejected addresses :
# - rejected_addresses["reject_reason"] == "mismatch" : by comparing field by field input address and output address, this addresses has been rejected
# - rejected_addresses["reject_reason"] == "tail" : when OSM returns several results, only one is kept in "osm_addresses", all the others are put in rejected_addresses
#
# Note that an addresse may have been rejected at a specific step (for a giver sequence of transformer), but not at another one.
# "rejected_addresses" may then contain a lot of addresses for which a result has been accepted further on.
#
# "rejected_addresses_final" contains the only addresses for which all results have been rejected.
#
# In[27]:
rejected_addresses_final = rejected_addresses[rejected_addresses["reject_reason"] == "mismatch"]
rejected_addresses_final = rejected_addresses_final[~rejected_addresses_final[config_module.addr_key_field].isin(osm_addresses[config_module.addr_key_field])]
# Needed with check_with_transformed = True (but doesn't hurt if not)
rejected_addresses_final = rejected_addresses_final.drop([config_module.street_field,
config_module.housenbr_field,
config_module.postcode_field,
config_module.city_field,
config_module.country_field],
axis=1
)
# print(rejected_addresses.keys())
# print(osm_addresses.keys())
# print(rejected_addresses.keys() & osm_addresses.keys())
rejected_addresses_final = rejected_addresses_final.merge(addresses).sort_values(["SIM_street", config_module.addr_key_field])[["method",
config_module.addr_key_field, "osm_addr_in",
config_module.street_field, config_module.housenbr_field, config_module.postcode_field, config_module.city_field, config_module.country_field,
"addr_out_street", "addr_out_city", "addr_out_number", "addr_out_postcode", "addr_out_other", "SIM_street", "SIM_zip"]].drop_duplicates()
log("Rejected addresses: ")
log(rejected_addresses_final)
# In[28]:
log(f"Number of unique rejected addresses: {rejected_addresses_final[config_module.addr_key_field].nunique()}")
# In[29]:
log(f"Number of unique city-streets in rejected addresses: {rejected_addresses_final[[config_module.postcode_field, config_module.street_field]].drop_duplicates().shape[0]}")
# In[30]:
rejected_addresses_final[rejected_addresses_final.addr_out_street.isnull()]
# In[31]:
rejected_addresses_final[rejected_addresses_final.addr_out_street.notnull()]#.drop(["method"], axis=1).drop_duplicates()
# In[32]:
# Swap street - city
log("Rejected addresses, but where it might have a swap between street and city")
str_cmp= street_compare(rejected_addresses_final[config_module.street_field], rejected_addresses_final.addr_out_city)
x= rejected_addresses_final[(str_cmp>0.5) & (rejected_addresses_final.addr_out_street.isnull()) & (rejected_addresses_final.SIM_zip >= 0.1)].drop_duplicates(subset=config_module.addr_key_field)
log(x)
log(f"Number of unique addresses: {x[config_module.addr_key_field].nunique()}")
# In[33]:
# Other mismatches
rejected_addresses_final[(str_cmp<=0.5) | (rejected_addresses_final.addr_out_street.notnull()) | (rejected_addresses_final.SIM_zip < 0.1)].drop_duplicates(subset=config_module.addr_key_field)
# # No match
# In[34]:
log("Addresses with no match (but some matches where rejected)")
log(addresses[~addresses[config_module.addr_key_field].isin(osm_addresses[config_module.addr_key_field]) & addresses[config_module.addr_key_field].isin(rejected_addresses[config_module.addr_key_field])])
# In[35]:
rejected_addresses
# In[36]:
log("Addresses with no match at all")
no_match = addresses[~addresses[config_module.addr_key_field].isin(osm_addresses[config_module.addr_key_field]) & ~addresses[config_module.addr_key_field].isin(rejected_addresses[config_module.addr_key_field])]
log(no_match)
# In[37]:
log(f"Number of unique city-streets in no match addresses: {no_match[[config_module.postcode_field, config_module.street_field]].drop_duplicates().shape[0]}")
# In[38]:
log("Main cities in no match addresses: ")
log(no_match[config_module.city_field].value_counts().head(10))
# In[39]:
log("Main streets in no match addresses: ")
log(no_match[config_module.street_field].value_counts().head(10))
# # Extra house number
# In many situation, OSM does not return a correct house number :
# - Either because the building is not known by OSM. In this case, house number is empty in result
# - Or because house number in input also contains information such as box, level...
#
# We then consider that house number is not reliable enough and compute our own house number field, named "extra_house_nbr"
# In[40]:
log("Add extra house number")
osm_addresses = add_extra_house_number(osm_addresses, addresses, street_field=config_module.street_field, housenbr_field=config_module.housenbr_field)
# In[41]:
# osm_addresses.drop("extra_house_nbr", axis=1, inplace=True)
# In[42]:
ex_hs_nb = osm_addresses[[config_module.addr_key_field, "osm_addr_in", "extra_house_nbr", "addr_out_number"]].replace("", np.NaN)
# In[43]:
log("Add new information: ")
log(ex_hs_nb[ex_hs_nb.addr_out_number.isnull() & ex_hs_nb.extra_house_nbr.notnull()])
# In[44]:
log("No number at all: ")
log(ex_hs_nb[ex_hs_nb.addr_out_number.isnull() & ex_hs_nb.extra_house_nbr.isnull()])
# In[45]:
log("Agreed: ")
log(ex_hs_nb[ex_hs_nb.addr_out_number.notnull() & ex_hs_nb.extra_house_nbr.notnull() & (ex_hs_nb.addr_out_number == ex_hs_nb.extra_house_nbr)])
# In[46]:
log("Disagreed: ")
log(ex_hs_nb[ex_hs_nb.addr_out_number.notnull() & ex_hs_nb.extra_house_nbr.notnull() & (ex_hs_nb.addr_out_number != ex_hs_nb.extra_house_nbr)])
# In[47]:
log("Error: ") # There were no number in input, but OSM found one
log(ex_hs_nb[ex_hs_nb.addr_out_number.notnull() & ex_hs_nb.extra_house_nbr.isnull()])
# In[48]:
extra_address_stats = {
"New information" : (ex_hs_nb.addr_out_number.isnull() & ex_hs_nb.extra_house_nbr.notnull()).sum(),
"No number at all": (ex_hs_nb.addr_out_number.isnull() & ex_hs_nb.extra_house_nbr.isnull() ).sum(),
"Agree" : (ex_hs_nb.addr_out_number.notnull() & ex_hs_nb.extra_house_nbr.notnull() & (ex_hs_nb.addr_out_number == ex_hs_nb.extra_house_nbr)).sum(),
"Disagree": (ex_hs_nb.addr_out_number.notnull() & ex_hs_nb.extra_house_nbr.notnull() & (ex_hs_nb.addr_out_number != ex_hs_nb.extra_house_nbr)).sum(),
"Error" : (ex_hs_nb.addr_out_number.notnull() & ex_hs_nb.extra_house_nbr.isnull()).sum()
}
extra_address_stats = | pd.DataFrame(extra_address_stats, index=["Count"]) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import re
import os
def get_plot_data(path, span=100):
df = pd.DataFrame()
with open(path + 'test.txt') as file:
data = pd.read_csv(file, index_col=None)
df = df.append(data, ignore_index=True)
df['r'] = df['r'].ewm(span=span).mean()
return df
i = 4
TIMESTEP = 1e6
NSAMPLE = 1e4
GAMES = ['Breakout', 'Seaquest', 'Pong', 'MontezumaRevenge', 'BitFlip']
YMAXS = [600, 2000, 5000, 1, 1, 6000, 17000, 1, 1]
METHODS = ['dqn', 'her-dqn']
res_dir = './res/'
files = os.listdir(res_dir)
sample_list = np.arange(0, TIMESTEP, TIMESTEP/NSAMPLE, dtype=np.int)
df = pd.DataFrame()
for file in os.listdir(res_dir):
m = re.match('(.*)_(.*)_(.*)', file)
env = m.group(1)
method = m.group(2)
seed = m.group(3)
if (GAMES[i] in env) and (method in METHODS):
path = res_dir + file + '/'
data = get_plot_data(path)
sample = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from functools import reduce
import pickle
import os
import pymssql
from virgo import market
startDate_default = '20060101'
endDate_default = (datetime.now() + timedelta(days=-1)).strftime('%Y%m%d')
# endDate_default = datetime.now().strftime('%Y%m%d')
indexTickerUnivSR_default = np.array(['000300.SH', '000016.SH', '000905.SH'])
indexTickerNameUnivSR_default = np.array(['沪深300', '上证50', '中证500'])
# Global val
conn243 = pymssql.connect(server='192.168.1.243', user="yuman.hu", password="<PASSWORD>")
conn247 = pymssql.connect(server='192.168.1.247', user="yuman.hu", password="<PASSWORD>")
# daily data download
class dailyQuant(object):
def __init__(self, startDate=startDate_default, endDate=endDate_default,
indexTickerUnivSR=indexTickerUnivSR_default, indexTickerNameUnivSR=indexTickerNameUnivSR_default):
self.startDate = startDate
self.endDate = endDate
self.rawData_path = '../data/rawData/'
self.fundamentalData_path = '../data/fundamentalData/'
self.indexTickerUnivSR = indexTickerUnivSR
self.indexTickerNameUnivSR = indexTickerNameUnivSR
self.tradingDateV, self.timeSeries = self.get_dateData()
self.tickerUnivSR, self.stockTickerUnivSR, self.tickerNameUnivSR, self.stockTickerNameUnivSR, self.tickerUnivTypeR = self.get_tickerData()
def get_dateData(self):
sql = '''
SELECT [tradingday]
FROM [Group_General].[dbo].[TradingDayList]
where tradingday>='20060101'
order by tradingday asc
'''
dateSV = pd.read_sql(sql, conn247)
tradingdays = dateSV.tradingday.unique()
tradingDateV = np.array([x.replace('-', '') for x in tradingdays])
timeSeries = pd.Series(pd.to_datetime(tradingDateV))
pd.Series(tradingDateV).to_csv(self.rawData_path+ 'tradingDateV.csv', index=False)
return tradingDateV, timeSeries
def get_tickerData(self):
# and B.[SecuAbbr] not like '%%ST%%'
# where ChangeDate>='%s'
sql = '''
SELECT A.[ChangeDate],A.[ChangeType],B.[SecuCode],B.[SecuMarket],B.[SecuAbbr]
FROM [JYDB].[dbo].[LC_ListStatus] A
inner join [JYDB].[dbo].SecuMain B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
order by SecuCode asc
'''
dataV = pd.read_sql(sql, conn243)
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
# dataV.ChangeDate = pd.Series([x.strftime('%Y%m%d') for x in dataV.ChangeDate.values])
dataV.ChangeDate = dataV.ChangeDate.map(lambda x: x.strftime('%Y%m%d'))
flagV = np.full(len(dataV), True)
flagList = []
for i in range(len(dataV)):
if dataV.iat[i, 1] == 4:
if dataV.iat[i, 0] < self.tradingDateV[0]:
flagList.append(dataV.iat[i, 2])
for i in range(len(dataV)):
if dataV.iat[i, 2] in flagList:
flagV[i] = False
dataV = dataV[flagV]
stockTickerUnivSR = dataV.SecuCode.unique()
tickerUnivSR = np.append(self.indexTickerUnivSR, stockTickerUnivSR)
stockTickerNameUnivSR = dataV.SecuAbbr.unique()
tickerNameUnivSR = np.append(self.indexTickerNameUnivSR, stockTickerNameUnivSR)
tickerUnivTypeR = np.append(np.full(len(self.indexTickerUnivSR), 3), np.ones(len(dataV)))
pd.DataFrame(self.indexTickerUnivSR).T.to_csv(self.rawData_path+'indexTickerUnivSR.csv', header=False, index=False)
pd.DataFrame(stockTickerUnivSR).T.to_csv(self.rawData_path+'stockTickerUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerUnivSR).T.to_csv(self.rawData_path+'tickerUnivSR.csv', header=False, index=False)
pd.DataFrame(self.indexTickerNameUnivSR).T.to_csv(self.rawData_path+'indexTickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(stockTickerNameUnivSR).T.to_csv(self.rawData_path+'stockTickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerNameUnivSR).T.to_csv(self.rawData_path+'tickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerUnivTypeR).T.to_csv(self.rawData_path+'tickerUnivTypeR.csv', header=False, index=False)
return tickerUnivSR, stockTickerUnivSR, tickerNameUnivSR, stockTickerNameUnivSR, tickerUnivTypeR
def __tradingData(self,tradingDay):
sql = '''
SELECT A.[TradingDay], B.[SecuMarket], B.[SecuCode], A.[PrevClosePrice],
A.[OpenPrice],A.[HighPrice],A.[LowPrice],A.[ClosePrice], A.[TurnoverVolume],A.[TurnoverValue]
FROM [JYDB].[dbo].[QT_DailyQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where A.tradingday='%s'
''' % tradingDay
dataStock = pd.read_sql_query(sql, conn243)
sql = '''
SELECT A.[TradingDay], B.[SecuMarket], B.[SecuCode], A.[PrevClosePrice],
A.[OpenPrice],A.[HighPrice],A.[LowPrice],A.[ClosePrice], A.[TurnoverVolume],A.[TurnoverValue]
FROM [JYDB].[dbo].[QT_IndexQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
where A.tradingday='%s'
''' % tradingDay
dataIndex = pd.read_sql_query(sql, conn243)
dataV = pd.concat([dataIndex,dataStock])
sql = '''
SELECT [TradingDay], [SecuCode], [StockReturns]
FROM [Group_General].[dbo].[DailyQuote]
where tradingday='%s'
''' % tradingDay
dataStock = pd.read_sql_query(sql, conn247)
sql = '''
SELECT A.[TradingDay], B.[SecuCode], A.[ChangePCT]
FROM [JYDB].[dbo].[QT_IndexQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
where A.tradingday='%s'
''' % tradingDay
dataIndex = pd.read_sql_query(sql, conn243)
dataIndex.ChangePCT = dataIndex.ChangePCT / 100
dataIndex = dataIndex.rename({'ChangePCT': 'StockReturns'}, axis='columns')
dataR = pd.concat([dataIndex, dataStock])
data = pd.merge(dataV,dataR)
flagMarket = data.SecuMarket==83
data['SecuCode'][flagMarket] = data['SecuCode'].map(lambda x: x + '.SH')
data['SecuCode'][~flagMarket] = data['SecuCode'].map(lambda x: x + '.SZ')
data.TradingDay = data.TradingDay.map(lambda x: x.strftime('%Y%m%d'))
preCloseM = pd.DataFrame(pd.pivot_table(data,values='PrevClosePrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
openM = pd.DataFrame(pd.pivot_table(data,values='OpenPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
highM = pd.DataFrame(pd.pivot_table(data,values='HighPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
lowM =pd.DataFrame(pd.pivot_table(data,values='LowPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
closeM = pd.DataFrame(pd.pivot_table(data,values='ClosePrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
volumeM = pd.DataFrame(pd.pivot_table(data,values='TurnoverVolume',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
amountM = pd.DataFrame(pd.pivot_table(data,values='TurnoverValue',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
retM = pd.DataFrame(pd.pivot_table(data,values='StockReturns',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)], columns=self.tickerUnivSR)
sql = '''
SELECT A.[ExDiviDate], B.[SecuMarket], B.[SecuCode], A.[AdjustingFactor]
FROM [JYDB].[dbo].[QT_AdjustingFactor] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
'''
dataAF = pd.read_sql_query(sql, conn243)
dataAF = dataAF.rename({'ExDiviDate':'TradingDay'},axis=1)
flagMarket = dataAF.SecuMarket == 83
dataAF['SecuCode'][flagMarket] = dataAF['SecuCode'].map(lambda x: x + '.SH')
dataAF['SecuCode'][~flagMarket] = dataAF['SecuCode'].map(lambda x: x + '.SZ')
dataAF.TradingDay = dataAF.TradingDay.map(lambda x: x.strftime('%Y%m%d'))
adjFactorM = pd.pivot_table(dataAF, values='AdjustingFactor', index='TradingDay', columns='SecuCode')
adjFactorM.fillna(method='pad', inplace=True)
adjFactorM = pd.DataFrame(adjFactorM ,index=self.tradingDateV, columns=self.tickerUnivSR)
adjFactorM.fillna(method='pad', inplace=True)
adjFactorM =pd.DataFrame(adjFactorM ,index=[str(tradingDay)])
sql = '''
SELECT A.[ChangeDate],A.[ChangeType],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_ListStatus] A
inner join [JYDB].[dbo].SecuMain B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where (A.ChangeType = 1 or A.ChangeType = 4)
'''
dataStock = pd.read_sql_query(sql, conn243)
sql = '''
SELECT A.[PubDate],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_IndexBasicInfo] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[IndexCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
'''
dataIndex = pd.read_sql_query(sql, conn243)
dataIndex['ChangeType'] = 1
dataIndex = dataIndex.rename({'PubDate': 'ChangeDate'}, axis='columns')
dataV = pd.concat([dataIndex, dataStock])
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
# dataV.ChangeDate = pd.Series([x.strftime('%Y%m%d') for x in dataV.ChangeDate.values])
dataV.ChangeDate = dataV.ChangeDate.map(lambda x: x.strftime('%Y%m%d'))
listedM = pd.pivot_table(dataV, values='ChangeType', index='ChangeDate', columns='SecuCode')
dateTotal = np.union1d(listedM.index.values, [str(tradingDay)])
listedM = pd.DataFrame(listedM, index=dateTotal, columns=self.tickerUnivSR)
listedM[listedM == 4] = 0
listedM.fillna(method='pad', inplace=True)
listedM = pd.DataFrame(listedM,index= [str(tradingDay)])
listedM = listedM.fillna(0)
sql = '''
SELECT A.[SuspendDate],A.[ResumptionDate],A.[SuspendTime], A.[ResumptionTime], B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_SuspendResumption] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where A.[SuspendDate] = '%s'
'''%tradingDay
if tradingDay == self.tradingDateV[0]:
sql = sql.replace('A.[SuspendDate] = ','A.[SuspendDate] <= ')
dataSusp = pd.read_sql_query(sql, conn243)
flagMarket = dataSusp.SecuMarket == 83
dataSusp['SecuCode'][flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SH')
dataSusp['SecuCode'][~flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SZ')
dataSusp.SuspendDate = dataSusp.SuspendDate.map(lambda x: x.strftime('%Y%m%d'))
dataSusp['flag'] = 1
startFlag = pd.pivot_table(dataSusp, values='flag',index='SuspendDate', columns='SecuCode')
try:
startFlag = pd.DataFrame(startFlag, index=[str(tradingDay)], columns=self.tickerUnivSR)
except:
startFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
endFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
amount = amountM.fillna(0)
flag = (amount == 0)
endFlag[startFlag == 1] = 1
endFlag[flag] = 1
suspM = endFlag.fillna(0)
suspM[(listedM==0)] = 1
else:
dataSusp = pd.read_sql_query(sql, conn243)
flagMarket = dataSusp.SecuMarket == 83
dataSusp['SecuCode'][flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SH')
dataSusp['SecuCode'][~flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SZ')
dataSusp.SuspendDate = dataSusp.SuspendDate.map(lambda x: x.strftime('%Y%m%d'))
file2 = open('../data/rawData/{}.pkl'.format(self.tradingDateV[self.tradingDateV.tolist().index(tradingDay)-1]), 'rb')
suspPre = pickle.load(file2)['suspM']
file2.close()
dataSusp['flag'] = 1
startFlag = pd.pivot_table(dataSusp, values='flag',index='SuspendDate', columns='SecuCode')
try:
startFlag = pd.DataFrame(startFlag, index=[str(tradingDay)], columns=self.tickerUnivSR)
except:
startFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
endFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
amount = amountM.fillna(0)
flag = (amount == 0)
endFlag[startFlag == 1] = 1
endFlag[~flag] = 0
suspM = pd.concat([suspPre,endFlag]).fillna(method='pad')
suspM = pd.DataFrame(suspM,index=[str(tradingDay)])
suspM[(listedM==0)] = 1
sql='''
SELECT A.[SpecialTradeTime],A.[SpecialTradeType],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_SpecialTrade] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where (A.[SpecialTradeType]=1 or A.[SpecialTradeType] = 2 or A.[SpecialTradeType] = 5 or A.[SpecialTradeType] = 6)
and A.[SpecialTradeTime] = '%s'
'''% tradingDay
if tradingDay == self.tradingDateV[0]:
sql = sql.replace('A.[SpecialTradeTime] = ','A.[SpecialTradeTime] <= ')
dataV = pd.read_sql_query(sql, conn243)
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
dataV.SpecialTradeTime = dataV.SpecialTradeTime.map(lambda x: x.strftime('%Y%m%d'))
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 5] = 1
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 2] = 0
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 6] = 0
stStateM = pd.pivot_table(dataV, values='SpecialTradeType', index='SpecialTradeTime', columns='SecuCode')
dateTotal = np.union1d(stStateM.index.values, [str(tradingDay)])
stStateM = pd.DataFrame(stStateM, index=dateTotal, columns=self.tickerUnivSR)
stStateM = stStateM.fillna(method='pad')
stStateM = pd.DataFrame(stStateM, index=[str(tradingDay)])
stStateM = stStateM.fillna(0)
else:
try:
file2 = open('../data/rawData/{}.pkl'.format(self.tradingDateV[self.tradingDateV.tolist().index(tradingDay)-1]), 'rb')
stStatePre = pickle.load(file2)['stStateM']
file2.close()
dataV = pd.read_sql_query(sql, conn243)
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
dataV.SpecialTradeTime = dataV.SpecialTradeTime.map(lambda x: x.strftime('%Y%m%d'))
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 5] = 1
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 2] = 0
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 6] = 0
stStateM = pd.pivot_table(dataV, values='SpecialTradeType', index='SpecialTradeTime', columns='SecuCode')
stStateM = pd.concat([stStatePre,stStateM]).fillna(method='pad')
except:
file2 = open('../data/rawData/{}.pkl'.format(self.tradingDateV[self.tradingDateV.tolist().index(tradingDay)-1]), 'rb')
stStatePre = pickle.load(file2)['stStateM']
file2.close()
stStateM = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
stStateM = | pd.concat([stStatePre,stStateM]) | pandas.concat |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import display, HTML
from matplotlib.patches import Patch
from pymongo import MongoClient
from pprint import pprint
import pymongo
import json
import glob
with open('credential.json','r') as f:
cred = json.load(f)
#from crea_df import storico_individuale
from matplotlib.offsetbox import OffsetImage,AnnotationBbox
from matplotlib import rcParams
# figure size in inches
rcParams['figure.figsize'] = 11.7,8.27
# Set the style globally
plt.style.use('default')
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = 'Exo 2'
rcParams['font.weight'] = '500'
#rcParams['font.monospace'] = 'Ubuntu Mono'
rcParams['font.size'] = 16
rcParams['axes.labelsize'] = 16
rcParams['axes.labelweight'] = '500'
rcParams['axes.titleweight'] = '700'
rcParams['axes.titlesize'] = 16
rcParams['xtick.labelsize'] = 14
rcParams['ytick.labelsize'] = 14
rcParams['legend.fontsize'] = 12
rcParams['figure.titlesize'] = 18
current_season = '2021-22'
import os
# Stabilisci ultima giornata valida
def current_matchday(season=current_season):
cluster = MongoClient(cred['cred'])
db = cluster['Game']
coll_res = db['Results']
#giornate = len(next(os.walk('../IGNOBEL/Dati_storici/'))[2])
#giornate = len(glob.glob('../IGNOBEL/Dati_storici/*.pkl'))
giornate = len(list(coll_res.find({'season':current_season})))
return giornate
"""def storico_IG(giornata, dict_names, path = "../IGNOBEL/Dati_storici/"):
test_dict={
'enzo':{0:['gg','pf','ps','gs','c','pan','mod','inf','nome']},
'pietro':{0:['gg','pf','ps','gs','c','pan','mod','inf','nome']},
'mario':{0:['gg','pf','ps','gs','c','pan','mod','inf','nome']},
'musci8':{0:['gg','pf','ps','gs','c','pan','mod','inf','nome']},
'franky':{0:['gg','pf','ps','gs','c','pan','mod','inf','nome']},
'nanni':{0:['gg','pf','ps','gs','c','pan','mod','inf','nome']},
'emiliano':{0:['gg','pf','ps','gs','c','pan','mod','inf','nome']},
'luca':{0:['gg','pf','ps','gs','c','pan','mod','inf','nome']}}
for i in range(1,giornata+1):
G=pd.read_pickle(path+"Giornata_"+str(i)+".pkl")
for team, content in G.items():
test_dict[dict_names[team]][i] = [i, content[0], float(content[1]), content[2], content[3]+2*content[4], content[5], content[6], content[7], dict_names[team]]
return(test_dict)"""
def storico_IG_mongo(giornata, dict_names, season = current_season):
cluster = MongoClient(cred['cred'])
db = cluster['Game']
coll_res = db['Results']
test_dict={
'enzo':{0:['gg','pf','ps','gs','c','pan','mod','inf','nome']},
'pietro':{0:['gg','pf','ps','gs','c','pan','mod','inf','nome']},
'mario':{0:['gg','pf','ps','gs','c','pan','mod','inf','nome']},
'musci8':{0:['gg','pf','ps','gs','c','pan','mod','inf','nome']},
'franky':{0:['gg','pf','ps','gs','c','pan','mod','inf','nome']},
'nanni':{0:['gg','pf','ps','gs','c','pan','mod','inf','nome']},
'emiliano':{0:['gg','pf','ps','gs','c','pan','mod','inf','nome']},
'luca':{0:['gg','pf','ps','gs','c','pan','mod','inf','nome']}}
for i in range(1,giornata+1):
#G=pd.read_pickle(path+"Giornata_"+str(i)+".pkl")
post = coll_res.find_one({'season':season,'matchday':i})
G = pd.DataFrame(post['results'])
for team, content in G.items():
test_dict[dict_names[team]][i] = [i, content[0], float(content[1]), content[2], content[3]+2*content[4], content[5], content[6], content[7], dict_names[team]]
return(test_dict)
def storico_individuale(nome, giornata):
dict_names={
'AS 800A': 'enzo',
'PDG 1908': 'pietro',
'IGNORANZA EVERYWHERE': 'mario',
'SOROS FC': 'musci8',
'MAINZ NA GIOIA': 'franky',
'<NAME>': 'nanni',
'I DISEREDATI': 'emiliano',
'XYZ': 'luca'
}
test_dict = storico_IG_mongo(giornata, dict_names)
#test_dict = storico_IG(giornata, dict_names)
df = pd.DataFrame(data=test_dict[nome]).T
new_header = df.iloc[0] #grab the first row for the header
df = df[1:] #take the data less the header row
df.columns = new_header
#df = df.reset_index()
#df = df.drop('index',axis=1)
return df
### Init dataframes #########################################################
def set_par(fasce = 2):
Teams = {'luca' : ['XYZ', 'darkblue'],
'franky' : ['<NAME>', 'r'],
'emiliano' : ['<NAME>', 'skyblue'],
'nanni' : ['Palla Pazza', 'firebrick'],
'enzo' : ['AS 800A', 'gold'],
'pietro' : ['PDG 1908', 'dodgerblue'],
'musci8' : ['Soros fc', 'lightgreen'],
'mario' : ['Ignoranza', 'pink'],
}
Logos = {'luca' : '../Logos/fit/luca.png',
'franky' : '../Logos/fit/franky.png',
'nanni' : '../Logos/fit/nanni.png',
'pietro' : '../Logos/fit/pietro.png',
'mario' : '../Logos/fit/mario.png',
'enzo' : '../Logos/fit/enzo.png',
'musci8' : '../Logos/fit/musci8.png',
'emiliano' : '../Logos/fit/emiliano.png'}
parameters = [
'punti_fatti', # fantapunti realizzati
'punti_subiti', # fantapunti subiti
'goal_subiti_por', # goal subiti dal portiere che concorre a punteggio squadra
'cartellini', # subiti da giocatori che concorrono a punteggio squadra (giallo=1, rosso=2)
'bonus_panchina', # somma dei bonus giocatori che non concorrono a punteggio squadra
'mod_difesa', # modificatore difesa
'infortunati' #infortunati giornata
#'mod_fairplay' # modificatore fairplay
]
df = pd.DataFrame(columns=parameters)
df_luca = df
df_franky = df
df_nanni = df
df_pietro = df
df_mario = df
df_enzo = df
df_musci8 = df
df_emiliano = df
Results = {
'luca' : df_luca,
'franky' : df_franky,
'emiliano' : df_emiliano,
'nanni' : df_nanni,
'enzo' : df_enzo,
'pietro' : df_pietro,
'musci8' : df_musci8,
'mario' : df_mario,
}
# Steps of scored goals
#goal_marks=np.array([66,68,70,72,74,76,78,80,82,84,86,88,90])
goal_marks = np.arange(66,100,fasce)
return Teams, Logos, parameters, Results, goal_marks
### Utility functions ################################################
def get_goal(fp, goal_marks):
'''Return number of goals given fantapoints'''
goal_counter = 0
for mark in goal_marks:
if fp<mark: return goal_counter
goal_counter = goal_counter+1
return goal_counter
def esito(gf,gs):
'''Return match result (V,P,S) given goal scored and conceded'''
if gf>gs: return 'V'
elif gf<gs: return 'S'
else: return 'P'
def punti(esito):
'''Return standing points given match result'''
try:
if esito=='V': return 3
elif esito=='S': return 0
elif esito=='P': return 1
except ValueError:
print('Esito non valido')
def mod_fairplay(cartellini):
'''Return modificatore fairplay value given number of yellow/red cards'''
if cartellini==0: return 1
return 0
def get_team_colors(Teams):
'''Return list of team colors by default order'''
colors = []
for key in Teams.keys():
colors.append(Teams[key][1])
return colors
def fattore_distacco(Total):
pf_med = np.median(Total['punti_fatti'])
pf_std = np.std(Total['punti_fatti'])
dist_med = np.median(Total['distacco'])
dist_std = np.std(Total['distacco'])
pf_rel = (Total['punti_fatti'] - pf_med) / pf_std
dist_rel = (Total['distacco'] - dist_med) / dist_std
return -dist_rel-pf_rel
### Fill dataframe per partita #############################
#############################################################
# Crea calendario con dictionary del rivale per ciscauna squadra, giornata per giornata
#############################################################
def make_calendar_array(data_path = '../IGNOBEL/Dati_storici/'):
giornate = current_matchday()
cluster = MongoClient(cred['cred'])
db = cluster['Game']
coll_res = db['Results']
# dictionary converting team names into owner names
team2owners = {
'XYZ': 'luca',
'MAINZ NA GIOIA': 'franky',
'I DISEREDATI': 'emiliano',
'<NAME>' : 'nanni',
'AS 800A': 'enzo',
'PDG 1908': 'pietro',
'SOROS FC': 'musci8',
'IGNORANZA EVERYWHERE':'mario',
}
calendar = [] #array con accoppiamenti squadre giornata per giornata
rivals = {} #dictionary con accoppiamenti singola giornata
# Rimepi array calendar con dictionary accoppiamento per ogni owner
for giornata in range(1,giornate+1):
# read Dati storici
#df = pd.read_pickle(data_path + 'Giornata_%d.pkl' % giornata)
df = pd.DataFrame(coll_res.find_one({'season':'2021-22','matchday':giornata})['results'])
teams = df.columns
# accoppia teams
for i, team in enumerate(teams):
owner = team2owners[team]
if i%2 == 0:
rival_owner = team2owners[teams[i+1]]
else:
rival_owner = team2owners[teams[i-1]]
rivals[owner] = rival_owner # dict accoppiamenti di giornata
# array di accoppiamenti per giornata
calendar.append(rivals)
rivals = {}
return calendar
#############################################################
# Aggiungi colonna avversario ai dataframes in Results
#############################################################
def add_avversario_column_to_Results(calendar, Results):
giornate = current_matchday()
for owner in Results.keys():
avversari_array = []
for giornata in range(0,giornate):
avversario = calendar[giornata][owner]
avversari_array.append(avversario)
Results[owner]['avversario'] = avversari_array
return Results
def fill_dataframe_partita(Results_0, giornate, parameters, goal_marks, Teams, Print = False):
Results = Results_0
for team, df in Results.items():
Results[team] = Results[team][0:0]
#Results[team] = pd.read_csv(team+'.txt', sep=' ', names=parameters, skiprows=1)
#################### AGGIUNTO DA LUCA: CREA DIRETTAMENTE I DF USANDO LA FUNZIONE IMPORTATA, NON PIU' LETTI DA PICKLES
#Results[team] = pd.read_pickle('Dati_individuali/'+team+'.pkl')
Results[team] = storico_individuale(team, giornate)
Results[team] = Results[team].drop('gg',axis=1)
Results[team] = Results[team].drop('nome',axis=1)
Results[team].columns = parameters
#####################
Results[team]['mod_fairplay'] = Results[team].apply(lambda x: mod_fairplay(x['cartellini']), axis=1)
Results[team]['GF'] = Results[team].apply(lambda x: get_goal(x['punti_fatti'], goal_marks), axis=1)
Results[team]['GS'] = Results[team].apply(lambda x: get_goal(x['punti_subiti'], goal_marks), axis=1)
Results[team]['esito'] = Results[team].apply(lambda x: esito(x['GF'],x['GS']), axis=1)
Results[team]['pti'] = Results[team].apply(lambda x: punti(x['esito']), axis=1)
Results[team] = Results[team].assign(Team=team)
# Add avversario column to each team's dataframe
calendar = make_calendar_array()
Results = add_avversario_column_to_Results(calendar = calendar, Results = Results)
if Print:
print('###', team, '|', Teams[team], '###')
display(Results[team])
print('\n\n\n')
return Results
def cumulative_data(Results, giornate, Print = True):
### concatenate team dataframes ##################
cdf = pd.concat(Results)
pf_med = np.median(cdf.punti_fatti)
pf_std = np.std(cdf.punti_fatti)
ps_med = np.median(cdf.punti_subiti)
ps_std = np.std(cdf.punti_subiti)
gf_med = np.median(cdf.GF)
gf_std = np.std(cdf.GF)
if Print:
print('### CUMULATIVE DATA after', giornate, 'rounds ###')
print('Punti Fatti:\n mediana =', pf_med, '\n varianza =', pf_std,
#'\nPunti Subiti:\n mediana =', ps_med, '\n varianza =', ps_std,
'\nGoal Fatti:\n mediana =', gf_med, '\n varianza =', gf_std
)
return pf_med, pf_std, ps_med, ps_std, gf_med, gf_std
### values dataframe #######################
def close_games(Results, giornate, verbose=False):
data = Results
'''Compute close games per team and add factor to Total dataframe'''
factor_close_games = []
for team, df in data.items():
df = df[df.index <= giornate]
if verbose: print('\n###', team, '###')
pti=0
g = 0
for gg in df.index:
if (np.abs(df['punti_fatti'][gg]-df['punti_subiti'][gg]))<=2:
res = esito(df.GF[gg], df.GS[gg])
if verbose: print('G', gg, '| punti fatti:', df.punti_fatti[gg], ' ( subiti:', df.punti_subiti[gg],') |', res, '(', df.GF[gg], '-', df.GS[gg], ')')
pti = pti+punti(res)
g = g+1
try:
if verbose: print('---> %d punti in %d giornate \n (media: %.2f)' % (pti, g, pti/g))
factor_close_games.append(pti-g)
except:
if verbose: print('---> No close games found')
factor_close_games.append(0)
return factor_close_games
def low_scoring_games(Results, giornate, verbose=False):
data = Results
'''Compute close games per team and add factor to Total dataframe'''
factor_low_scoring_games = []
for team, df in data.items():
df = df[df.index <= giornate]
if verbose: print('\n###', team, '###')
pti=0
g = 0
for gg in df.index:
if df['punti_fatti'][gg] < 62:
res = esito(df.GF[gg], df.GS[gg])
if verbose: print('G', gg, '| punti fatti:', df.punti_fatti[gg], ' ( subiti:', df.punti_subiti[gg],') |', res, '(', df.GF[gg], '-', df.GS[gg], ')')
pti = pti+punti(res)
g = g+1
try:
if verbose: print('---> %d punti in %d giornate \n (media: %.2f)' % (pti, g, pti/g))
factor_close_games.append(pti-g)
except:
if verbose: print('---> No games found')
factor_close_games.append(0)
return factor_low_scoring_games
def exact_fp(Results, giornate, goal_marks, verbose=False):
data = Results
'''Compute close games per team and add factor to Total dataframe'''
factor = []
for team, df in data.items():
df = df[df.index <= giornate]
if verbose: print('\n###', team, '###')
pti=0
g = 0
for gg in df.index:
if df['punti_fatti'][gg] in goal_marks:
res = esito(df.GF[gg], df.GS[gg])
if verbose: print('G', gg, '| punti fatti:', df.punti_fatti[gg], ' ( subiti:', df.punti_subiti[gg],') |', res, '(', df.GF[gg], '-', df.GS[gg], ')')
if (df.GF[gg] - df.GS[gg]) <= 1:
res_ = esito(df.GF[gg]-1, df.GS[gg])
pti_ = punti(res_)
pti = pti+punti(res)-pti_
g = g+1
try:
if verbose: print('---> %d punti rubati in %d giornate' % (pti, g))
factor.append(pti)
except:
if verbose: print('---> No stolen games found')
factor.append(0)
return factor
def opponent_almost_scored(Results, giornate, goal_marks, verbose=False):
data = Results
'''Compute close games per team and add factor to Total dataframe'''
factor = []
for team, df in data.items():
df = df[df.index <= giornate]
if verbose: print('\n###', team, '###')
pti=0
g = 0
for gg in df.index:
if df['punti_subiti'][gg] in goal_marks-0.5:
res = esito(df.GF[gg], df.GS[gg])
if verbose: print('G', gg, '| punti fatti:', df.punti_fatti[gg], ' ( subiti:', df.punti_subiti[gg],') |', res, '(', df.GF[gg], '-', df.GS[gg], ')')
if (df.GF[gg] - df.GS[gg]) <= 1:
res_ = esito(df.GF[gg], df.GS[gg]+1)
#print('Actual Result: %s, Potential: %s' % (res, res_))
pti_ = punti(res_)
pti = pti+punti(res)-pti_
g = g+1
try:
if verbose: print('---> %d punti rubati in %d giornate' % (pti, g))
factor.append(pti)
except:
if verbose: print('---> No stolen games found')
factor.append(0)
return factor
def make_Total_df(Results, giornate, goal_marks, verbose=False):
data = Results
columns = list(data['luca'])
table = [[]]
for team,df in data.items():
df = df[df.index <= giornate]
values = []
for col in columns:
values.append(df[col].sum())
values[len(values)-1]=team
table.append(values)
table=table[1:]
Total=pd.DataFrame(table,columns=columns)
# additional columns
Total = Total.sort_values(by=['punti_fatti'], ascending=False)
Total['pos'] = Total['pti'].rank(ascending=False, method='first')
Total['rank'] = Total['punti_fatti'].rank(ascending=False, method='first')
Total['distacco'] = np.max(Total['pti']) - Total['pti']
Total['f_pos'] = Total['rank']-Total['pos']
Total['f_distacco'] = fattore_distacco(Total)
Total['x_punti_subiti'] = (Total['punti_fatti'].sum() - Total['punti_fatti'])/7
Total['x_GS'] = (Total['GF'].sum() - Total['GF'])/7
Total['f_GS'] = -1*(Total['GS']-Total['x_GS'])/np.std(Total['GS'])
Total.sort_index(inplace=True)
Total['f_close_games'] = close_games(Results, giornate)
Total['f_stolen_games'] = exact_fp(Results, giornate, goal_marks)
Total['f_unlucky_opponent'] = opponent_almost_scored(Results, giornate, goal_marks)
# dirty hack
Total['Team'] = Total['avversario']
return Total
########## PLOTTING FUNCTIONS
def X_goal_subiti(Total, giornate, Teams):
fig = plt.figure(figsize=(8,5))
colors = get_team_colors(Teams)
plt.bar(np.arange(0,8), Total['x_GS'], color=colors, alpha=0.35, label='Expected Goal Subiti')
plt.bar(np.arange(0,8), Total['GS'], color=colors, alpha=0.99, width=0.5, label='GS')
xlabels = Total['Team']
plt.xticks(Total.index, xlabels, rotation=45, ha='right')
plt.grid(which='both', axis='y', alpha=0.25)
plt.ylim(ymin = np.min(Total['GS']-1))
plt.ylabel('Goal Subiti')
title = 'x Goal Subiti (' + str(giornate) + ' Giornate)'
plt.title(title)
plt.legend()
plt.show()
def fattore_goal_subiti(Total, giornate, Teams):
fig = plt.figure(figsize=(6,5))
plt.grid(which='both', axis='y', ls='-', alpha=0.25)
colors = get_team_colors(Teams)
std = np.std(Total['GS'])
plt.bar(np.arange(0,8), -1*(Total['GS']-Total['x_GS'])/std, color=colors, alpha=0.99, width=0.8, label='Fattore GS')
xlabels = Total['Team']
plt.xticks(Total.index, xlabels, rotation=45, ha='right')
plt.axhline(y=0, xmin=-100, xmax=100, color='grey', ls='-')
plt.ylim(-2,2)
plt.ylabel('(xGS-GS)/std(GS) [$N\,\sigma$]')
title = 'Fattore Goal Subiti (' + str(giornate) + ' Giornate)'
plt.title(title)
plt.text(x=8.25, y=+0.5, s='Fortuna', verticalalignment='bottom', horizontalalignment='right', color='grey', rotation='90')
plt.text(x=8.25, y=-0.5, s='Sfortuna', verticalalignment='top', horizontalalignment='right', color='grey', rotation='90')
#plt.legend()
plt.show()
def punti_VS_fantapunti(Total, giornate, Teams):
### Punti fatti vs classifica ###############
fig = plt.figure(figsize=(8,5))
colors = get_team_colors(Teams)
x = Total['punti_fatti']
y = Total['pti']
plt.bar(np.arange(0,8), x, color=colors, alpha=0.99, label='Expected Goal Subiti')
plt.ylabel('Fantapunti')
plt.ylim(np.min(x)-10, np.max(x)+10)
xlabels = Total['Team']
plt.xticks(Total.index, xlabels, rotation=45, ha='right')
# secondary y axis
ax2 = plt.twinx()
ax2.set_ylabel('Fantapunti')
ax2.tick_params(axis='y', colors='k')
ax2.yaxis.label.set_color('k')
ax2.plot([],[])
plt.bar(np.arange(0,8), y, color='w', edgecolor=colors, alpha=1, width=0.4, lw=2, label='GS')
#plt.xticks(Total.index, xlabels, rotation=45, ha='right')
plt.grid(which='both', axis='y', alpha=0.25)
#plt.ylim(ymin = np.min(Total['GS']-1))
plt.ylabel('Punti in classifica')
title = 'PUNTI vs FANTAPUNTI | ' + str(giornate) + ' Giornate'
plt.title(title)
#legend
legend_elements = [
Patch(facecolor='k', edgecolor='k', label='Fantapunti'),
Patch(facecolor='w', edgecolor='k', label='Punti'),
]
plt.legend(handles=legend_elements, loc='upper left', bbox_to_anchor=(1.05,1))
plt.show()
def fantapunti_stats(Total, giornate, Teams, pf_std, pf_med):
fig = plt.figure(figsize=(6,5))
plt.grid(which='both', axis='y', ls='-', alpha = 0.25)
colors = get_team_colors(Teams)
std = pf_std*giornate
med = pf_med*giornate
plt.bar(np.arange(0,8), (Total['punti_fatti']-med)/std, color=colors, alpha=0.99, width=0.8, label='')
xlabels = Total['Team']
plt.xticks(Total.index, xlabels, rotation=45, ha='right')
plt.axhline(y=0, xmin=-100, xmax=100, color='grey', ls='-')
#plt.ylim(-2,2)
plt.ylabel('(fantapunti_fatti-median)/std [$N\,\sigma$]')
title = 'Fantapunti Fatti (' + str(giornate) + ' Giornate)'
plt.title(title)
plt.show()
def fattore_close_games(Total, giornate, Teams):
fig = plt.figure(figsize=(6,5))
plt.grid(which='major', axis='y', ls='-', alpha=0.25)
colors = get_team_colors(Teams)
plt.bar(np.arange(0,8), Total['f_close_games'], color=colors, alpha=0.99, width=0.8, label='')
xlabels = Total['Team']
plt.xticks(Total.index, xlabels, rotation=45, ha='right')
plt.axhline(y=0, xmin=-100, xmax=100, color='grey', ls='-')
ylim = np.max(np.abs(Total['f_close_games']))*1.1
#plt.ylim(-2.1,2.1)
plt.ylabel('Punti guadagnati risp. a solo pareggi')
title = 'Fattore Close Games (' + str(giornate) + ' Giornate)'
plt.title(title)
plt.text(x=8.25, y=+0.5, s='Fortuna', verticalalignment='bottom', horizontalalignment='right', color='grey', rotation='90')
plt.text(x=8.25, y=-0.5, s='Sfortuna', verticalalignment='top', horizontalalignment='right', color='grey', rotation='90')
plt.show()
def get_bigradient_colors(Total, Teams, max_f, min_f):
colors = []
edgecolors = []
for key in Teams.keys():
x = Total[Total['Team']==key]['IndiceFortuna'].sum()
if Total[Total['Team']==key]['IndiceFortuna'].sum() >= 0:
x = (max_f - Total[Total['Team']==key]['IndiceFortuna'].sum())/max_f
s = (x,1,x)
colors.append(s)
edgecolors.append('g')
#edgecolors.append('limegreen')
else:
x = -(Total[Total['Team']==key]['IndiceFortuna'].sum() - min_f)/min_f
s=(1,x,x)
colors.append(s)
#edgecolors.append('orangered')
edgecolors.append('r')
return colors, edgecolors
# Take negative and positive data apart and cumulate
def get_cumulated_array(data, **kwargs):
cum = data.clip(**kwargs)
cum = np.cumsum(cum, axis=0)
d = np.zeros(np.shape(data))
d[1:] = cum[:-1]
return d
def calc_fortuna(df, giornate, tot_giornate):
'''
keys = ('f_pos', 'f_distacco', 'f_GS', 'f_close_games', 'f_stolen_games', 'f_unlucky_opponent')
scaling = (0.5, 1, 1, 0.5, 0.25*tot_giornate/giornate, 0.25*tot_giornate/giornate)
cols = ['dodgerblue', 'purple', 'r', 'gold', 'c', 'lime']
factors = []
df['IndiceFortuna'] = [0,0,0,0,0,0,0,0]
### build fortuna
for col,scale in zip(keys,scaling):
factors.append(df[col]*scale)
df['IndiceFortuna'] = df['IndiceFortuna'] + df[col]*scale
return df, factors
'''
keys = ('f_pos', 'f_distacco', 'f_GS', 'f_close_games')
scaling = (0.5, 1, 1, 0.5)
cols = ['dodgerblue', 'purple', 'r', 'gold']
factors = []
df['IndiceFortuna'] = [0,0,0,0,0,0,0,0]
### build fortuna
for col,scale in zip(keys,scaling):
factors.append(df[col]*scale)
df['IndiceFortuna'] = df['IndiceFortuna'] + df[col]*scale
return df, factors
def C_factor(Results, Total, giornate, tot_giornate, goal_marks, Teams):
cols = ['dodgerblue', 'purple', 'r', 'gold', 'c', 'lime']
keys = ('f_pos', 'f_distacco', 'f_GS', 'f_close_games', 'f_stolen_games', 'f_unlucky_opponent')
scaling = (0.5, 1, 1, 0.5, 0.25*tot_giornate/giornate, 0.25*tot_giornate/giornate)
#--- Fortuna Total
Total, __factors__ = calc_fortuna(Total, giornate, tot_giornate)
matchday = giornate
data_per_round = make_Total_df(Results, giornate, goal_marks, verbose=False)
__df__, __factors__ = calc_fortuna(data_per_round, giornate, tot_giornate)
# re-shape data for positive-negative bar plot
__data__ = np.array(__factors__)
#print(__data__)
data_shape = np.shape(__data__)
#print(data_shape)
cumulated_data = get_cumulated_array(__data__, min=0)
cumulated_data_neg = get_cumulated_array(__data__, max=0)
# Re-merge negative and positive data.
row_mask = (__data__<0)
cumulated_data[row_mask] = cumulated_data_neg[row_mask]
data_stack = cumulated_data
#print(np.shape(data_stack))
fig = plt.figure(figsize=(8,5))
plt.grid(which='major', axis='y', ls='-', alpha=0.25)
xlabels = __df__['Team']
plt.xticks(__df__.index, xlabels, rotation=45, ha='right')
plt.axhline(y=0, xmin=-100, xmax=100, color='grey', ls='-')
max_f = max(__df__['IndiceFortuna'])
min_f = np.min(__df__['IndiceFortuna'])
colors, edgecolors = get_bigradient_colors(Total, Teams, max_f, min_f)
plt.bar(np.arange(0,8), __df__['IndiceFortuna'], color='black', edgecolor=edgecolors, lw=0, alpha=0.15, width=0.9, label='')
#print(np.arange(0, data_shape[0]))
for i in np.arange(0, data_shape[0]):
plt.bar(np.arange(data_shape[1]), __data__[i], bottom=data_stack[i], color=cols[i],alpha=0.99, width=0.5, label=keys[i])
plt.legend()
for i, f in enumerate(list(np.round(__df__['IndiceFortuna'],decimals=1))):
if f<0:
va = 'top'
offset = -0.2
else:
va = 'bottom'
offset = 0.2
plt.annotate(f, (i, f+offset), horizontalalignment='center', verticalalignment=va)
plt.ylim(min_f-2, max_f+2)
plt.ylabel('Indice Fortuna')
title = 'C Factor (' + str(matchday) + '° Giornata)'
plt.title(title)
plt.show()
def offset_image(Logos, x,y, name, ax, zoom):
img = plt.imread(Logos[name])
im = OffsetImage(img, zoom=zoom)
im.image.axes = ax
ab = AnnotationBbox(im, (x, y), xybox=(0., 0.), frameon=False,
xycoords='data', boxcoords="offset points", pad=0)
ax.add_artist(ab)
def C_factor_logos(Total, giornate, Teams, tot_giornate, Logos):
Total, __factors__ = calc_fortuna(Total, giornate, tot_giornate)
fig = plt.figure(figsize=(10,6))
ax=fig.add_subplot(111)
plt.grid(which='major', axis='y', ls='-', alpha=0.25)
xlabels = Total['Team']
plt.xticks(Total.index, xlabels, rotation=45, ha='right')
plt.axhline(y=0, xmin=-100, xmax=100, color='grey', ls='-')
max_f = max(Total['IndiceFortuna'])
min_f = np.min(Total['IndiceFortuna'])
colors = get_team_colors(Teams)
ax.bar(np.arange(0,8), Total['IndiceFortuna'], color=colors, lw=0, alpha=0.99, width=0.8, label='')
for i, f in enumerate(list(np.round(Total['IndiceFortuna'],decimals=1))):
if f<0:
va = 'top'
offset = -0.1
else:
va = 'bottom'
offset = 0.1
plt.annotate(f, (i, f+offset), horizontalalignment='center', verticalalignment=va, weight='bold')
offset_image(Logos, i,f-4*offset, Total['Team'].iat[i], ax, zoom=0.15)
plt.ylim(min_f-1, max_f+1)
plt.ylabel('Indice Fortuna')
title = 'C FACTOR | ' + str(giornate) + ' Giornate'
plt.title(title, fontsize=20)
ax.tick_params(axis='x', which='major')
#for i, team in enumerate(Teams.keys()):
# offset_image(i, team, ax)
plt.savefig('Plots/C_fact_Total.png')
#plt.show()
def C_factor_logos_2(Total, giornate, Teams, tot_giornate, Logos):
Total, __factors__ = calc_fortuna(Total, giornate, tot_giornate)
df = Total
df = df.sort_values('IndiceFortuna', ascending=True)
fig = plt.figure(figsize=(6,8))
ax=fig.add_subplot(111)
plt.grid(which='major', axis='y', ls='-', alpha=0.25)
ax.tick_params(left=False)
#plt.axhline(y=0, xmin=-100, xmax=100, color='grey', ls='-')
max_f = max(df['IndiceFortuna'])
min_f = np.min(df['IndiceFortuna'])
colors = get_team_colors(Teams)
for team in df['Team']:
df_ = df[df.Team == team]
ax.barh(team.upper(), df_['IndiceFortuna'], color=Teams[team][1], lw=0, alpha=0.99, label='')
#for i, f in enumerate(list(np.round(Total['IndiceFortuna'],decimals=1))):
f = df_['IndiceFortuna'].iat[0]
if f<0:
va = 'right'
offset = -0.1
color = 'r'
else:
va = 'left'
offset = 0.1
color = 'tab:green'
plt.annotate('%.1f' % f, (f+offset, team.upper()), color=color, verticalalignment='center', horizontalalignment=va, weight='bold')
offset_image(Logos,-5*offset,team.upper(), team, ax, zoom=0.125)
plt.xlim(min_f-2, max_f+2)
plt.xlabel('Indice Fortuna')
title = 'C FACTOR | ' + str(giornate) + ' Giornate'
#plt.title(title, fontsize=20, )
plt.grid(axis='both', lw=0)
ax.set_title('C FACTOR', fontweight='bold', loc='left')
ax.set_title('Giornata: ' + str(giornate), fontweight='100', loc='right')
ax.tick_params(axis='y', which='major')
#for i, team in enumerate(Teams.keys()):
# offset_image(i, team, ax)
#plt.show()
plt.savefig('Plots/C_fact_Total.png')
#plt.show()
def partial_totals(Results, giornate, tot_giornate, goal_marks):
#--- build partial Total entries
#print('... Filling Total df per round ...')
Tot_per_round = []
for gg in Results['luca'].index:
data_per_round = make_Total_df(Results=Results, giornate = gg, goal_marks = goal_marks, verbose=False)
#print(data_per_round.dtypes)
__df__, __factors__ = calc_fortuna(data_per_round, giornate, tot_giornate)
Tot_per_round.append(__df__)
return Tot_per_round
def fortuna_evo(Results, Teams, Tot_per_round, title='Indice Fortuna Evolution', ylabel='Indice Fortuna'):
giornate = Results['luca'].index
gg = max(giornate)
ggfig = max(12,gg)
fig = plt.figure(figsize=(ggfig*0.6,6))
ax=fig.add_subplot(111)
data = []
for team in Teams.keys():
dd = | pd.DataFrame() | pandas.DataFrame |
import logging
import math
import os
import pandas as pd
import pandas._libs.json as ujson
from cirrocumulus.diff_exp import DE
from .data_processing import get_filter_str, get_mask
from .envir import CIRRO_SERVE, CIRRO_MAX_WORKERS, CIRRO_DATABASE_CLASS, CIRRO_JOB_RESULTS, CIRRO_JOB_TYPE
from .fdr import fdrcorrection
from .util import create_instance, add_dataset_providers, get_fs, import_path, open_file
executor = None
job_id_2_future = dict()
logger = logging.getLogger('cirro')
def save_job_result_to_file(result, job_id):
new_result = dict()
new_result['content-type'] = result.pop('content-type')
if new_result['content-type'] == 'application/json':
new_result['content-encoding'] = 'gzip'
url = os.path.join(os.environ[CIRRO_JOB_RESULTS], str(job_id) + '.json.gz')
with open_file(url, 'wt', compression='gzip') as out:
out.write( | ujson.dumps(result, double_precision=2, orient='values') | pandas._libs.json.dumps |
'''
__author__=<NAME>
MIT License
Copyright (c) 2020 crewml
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import pandas as pd
import logging
from crewml.common import DATA_DIR
from sklearn.preprocessing import LabelEncoder
from category_encoders import TargetEncoder
class FlightFeatureGenerator:
'''
This class creates new features for the flights based
on following criteria
1. All flgiths that departs and arrives within a base city
is marked as "1" with new feature BASE_FL
2. All flights with CRS_ELAPSED_TIME <=900 grouped together with
new feature GROUP=1,2,3, etc.
'''
def __init__(self, pairing_month,
feature_file,
feature_gen_file,
fa_bases,
fa_non_bases):
self.logger = logging.getLogger(__name__)
self.pairing_month = pairing_month
self.feature_file = feature_file
self.fa_bases = fa_bases
self.fa_non_bases = fa_non_bases
self.feature_gen_file = feature_gen_file
self.flights_df = pd.read_csv(
DATA_DIR+self.pairing_month+"/"+self.feature_file)
self.flights_df.drop(self.flights_df.filter(
regex="Unname"), axis=1, inplace=True)
self.flights_df.dropna()
self.flights_df['ORIGIN_UTC'] = pd.to_datetime(
self.flights_df['ORIGIN_UTC'])
self.flights_df['DEST_UTC'] = pd.to_datetime(
self.flights_df['DEST_UTC'])
self.group_id = 1
self.final_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import unittest
import platform
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import hpat
from hpat.tests.test_utils import (
count_array_REPs, count_parfor_REPs, count_array_OneDs, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba import types
from numba.config import IS_32BITS
from numba.errors import TypingError
_cov_corr_series = [(pd.Series(x), pd.Series(y)) for x, y in [
(
[np.nan, -2., 3., 9.1],
[np.nan, -2., 3., 5.0],
),
# TODO(quasilyte): more intricate data for complex-typed series.
# Some arguments make assert_almost_equal fail.
# Functions that yield mismaching results:
# _column_corr_impl and _column_cov_impl.
(
[complex(-2., 1.0), complex(3.0, 1.0)],
[complex(-3., 1.0), complex(2.0, 1.0)],
),
(
[complex(-2.0, 1.0), complex(3.0, 1.0)],
[1.0, -2.0],
),
(
[1.0, -4.5],
[complex(-4.5, 1.0), complex(3.0, 1.0)],
),
]]
min_float64 = np.finfo('float64').min
max_float64 = np.finfo('float64').max
test_global_input_data_float64 = [
[1., np.nan, -1., 0., min_float64, max_float64],
[np.nan, np.inf, np.NINF, np.NZERO]
]
min_int64 = np.iinfo('int64').min
max_int64 = np.iinfo('int64').max
max_uint64 = np.iinfo('uint64').max
test_global_input_data_integer64 = [
[1, -1, 0],
[min_int64, max_int64],
[max_uint64]
]
test_global_input_data_numeric = test_global_input_data_integer64 + test_global_input_data_float64
test_global_input_data_unicode_kind4 = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
test_global_input_data_unicode_kind1 = [
'ascii',
'12345',
'1234567890',
]
def _make_func_from_text(func_text, func_name='test_impl'):
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars[func_name]
return test_impl
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
GLOBAL_VAL = 2
class TestSeries(unittest.TestCase):
def test_create1(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3]})
return (df.A == 1).sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_unicode(self):
def test_impl():
S = pd.Series([
['abc', 'defg', 'ijk'],
['lmn', 'opq', 'rstuvwxyz']
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_integer(self):
def test_impl():
S = pd.Series([
[123, 456, -789],
[-112233, 445566, 778899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_float(self):
def test_impl():
S = pd.Series([
[1.23, -4.56, 7.89],
[11.2233, 44.5566, -778.899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
def test_create2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_create_series1(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index1(self):
# create and box an indexed Series
def test_impl():
A = pd.Series([1, 2, 3], ['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name='A')
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index4(self):
def test_impl(name):
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name=name)
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func('A'), test_impl('A'))
def test_create_str(self):
def test_impl():
df = pd.DataFrame({'A': ['a', 'b', 'c']})
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_pass_df1(self):
def test_impl(df):
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_df_str(self):
def test_impl(df):
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_series1(self):
# TODO: check to make sure it is series type
def test_impl(A):
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series2(self):
# test creating dataframe from passed series
def test_impl(A):
df = pd.DataFrame({'A': A})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_str(self):
def test_impl(A):
return (A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_index1(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
S = pd.Series([3, 5, 6], ['a', 'b', 'c'], name='A')
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_size(self):
def test_impl(S):
return S.size
hpat_func = hpat.jit(test_impl)
n = 11
for S, expected in [
(pd.Series(), 0),
(pd.Series([]), 0),
(pd.Series(np.arange(n)), n),
(pd.Series([np.nan, 1, 2]), 3),
(pd.Series(['1', '2', '3']), 3),
]:
with self.subTest(S=S, expected=expected):
self.assertEqual(hpat_func(S), expected)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_attr2(self):
def test_impl(A):
return A.copy().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr3(self):
def test_impl(A):
return A.min()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_series_attr4(self):
def test_impl(A):
return A.cumsum().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_argsort1(self):
def test_impl(A):
return A.argsort()
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_attr6(self):
def test_impl(A):
return A.take([2, 3]).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr7(self):
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_getattr_ndim(self):
'''Verifies getting Series attribute ndim is supported'''
def test_impl(S):
return S.ndim
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_getattr_T(self):
'''Verifies getting Series attribute T is supported'''
def test_impl(S):
return S.T
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_str1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_copy_int1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_deep(self):
def test_impl(A, deep):
return A.copy(deep=deep)
hpat_func = hpat.jit(test_impl)
for S in [
pd.Series([1, 2]),
pd.Series([1, 2], index=["a", "b"]),
]:
with self.subTest(S=S):
for deep in (True, False):
with self.subTest(deep=deep):
actual = hpat_func(S, deep)
expected = test_impl(S, deep)
pd.testing.assert_series_equal(actual, expected)
self.assertEqual(actual.values is S.values, expected.values is S.values)
self.assertEqual(actual.values is S.values, not deep)
# Shallow copy of index is not supported yet
if deep:
self.assertEqual(actual.index is S.index, expected.index is S.index)
self.assertEqual(actual.index is S.index, not deep)
def test_series_astype_int_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
handles string series not changing it
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['d', 'e', 'f'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[1, 2, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: requires str(datetime64) support in Numba')
def test_series_astype_dt_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts datetime series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03')
])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different'
'[left]: [0.000000, 1.000000, 2.000000, 3.000000, ...'
'[right]: [0.0, 1.0, 2.0, 3.0, ...'
'TODO: needs alignment to NumPy on Numba side')
def test_series_astype_float_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts float series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int32_to_int64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series with dtype=int32 to series with dtype=int64
'''
def test_impl(A):
return A.astype(np.int64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n), dtype=np.int32)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts integer series to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_float_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support string literal as dtype arg')
def test_series_astype_literal_dtype1(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype('int32')
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to int')
def test_series_astype_str_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of integers
'''
import numba
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series([str(x) for x in np.arange(n) - n // 2])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to float')
def test_series_astype_str_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['a', 'b', 'c'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[2, 3, 5])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_np_call_on_series1(self):
def test_impl(A):
return np.min(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values(self):
def test_impl(A):
return A.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values1(self):
def test_impl(A):
return (A == 2).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_shape1(self):
def test_impl(A):
return A.shape
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_static_setitem_series1(self):
def test_impl(A):
A[0] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_setitem_series1(self):
def test_impl(A, i):
A[i] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A.copy(), 0), test_impl(df.A.copy(), 0))
def test_setitem_series2(self):
def test_impl(A, i):
A[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, 0)
test_impl(A2, 0)
pd.testing.assert_series_equal(A1, A2)
@unittest.skip("enable after remove dead in hiframes is removed")
def test_setitem_series3(self):
def test_impl(A, i):
S = pd.Series(A)
S[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)
A1 = A.copy()
A2 = A
hpat_func(A1, 0)
test_impl(A2, 0)
np.testing.assert_array_equal(A1, A2)
def test_setitem_series_bool1(self):
def test_impl(A):
A[A > 3] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1)
test_impl(A2)
pd.testing.assert_series_equal(A1, A2)
def test_setitem_series_bool2(self):
def test_impl(A, B):
A[A > 3] = B[A > 3]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, df.B)
test_impl(A2, df.B)
pd.testing.assert_series_equal(A1, A2)
def test_static_getitem_series1(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
self.assertEqual(hpat_func(A), test_impl(A))
def test_getitem_series1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_getitem_series_str1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_iat1(self):
def test_impl(A):
return A.iat[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iat2(self):
def test_impl(A):
A.iat[3] = 1
return A
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_iloc1(self):
def test_impl(A):
return A.iloc[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iloc2(self):
def test_impl(A):
return A.iloc[3:8]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(
hpat_func(S), test_impl(S).reset_index(drop=True))
def test_series_op1(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op2(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
df = pd.DataFrame({'A': np.arange(1, n, dtype=np.int64)})
else:
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op3(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op4(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op5(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', 'Series values are different (20.0 %)'
'[left]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, 3486784401, 10000000000]'
'[right]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, -808182895, 1410065408]')
def test_series_op5_integer_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
operand_series = pd.Series(np.arange(1, n, dtype=np.int64))
else:
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op5_float_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op6(self):
def test_impl(A):
return -A
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_op7(self):
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
def test_series_op8(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', "Attribute dtype are different: int64, int32")
def test_series_op8_integer_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op8_float_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_inplace_binop_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 1)
def test_series_fusion2(self):
# make sure getting data var avoids incorrect single def assumption
def test_impl(A, B):
S = B + 2
if A[0] == 0:
S = A + 1
return S + B
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 3)
def test_series_len(self):
def test_impl(A, i):
return len(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_box(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_box2(self):
def test_impl():
A = pd.Series(['1', '2', '3'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_list_str_unbox1(self):
def test_impl(A):
return A.iloc[0]
hpat_func = hpat.jit(test_impl)
S = pd.Series([['aa', 'b'], ['ccc'], []])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
# call twice to test potential refcount errors
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_np_typ_call_replace(self):
# calltype replacement is tricky for np.typ() calls since variable
# type can't provide calltype
def test_impl(i):
return np.int32(i)
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(1), test_impl(1))
def test_series_ufunc1(self):
def test_impl(A, i):
return np.isinf(A).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A, 1), test_impl(df.A, 1))
def test_list_convert(self):
def test_impl():
df = pd.DataFrame({'one': np.array([-1, np.nan, 2.5]),
'two': ['foo', 'bar', 'baz'],
'three': [True, False, True]})
return df.one.values, df.two.values, df.three.values
hpat_func = hpat.jit(test_impl)
one, two, three = hpat_func()
self.assertTrue(isinstance(one, np.ndarray))
self.assertTrue(isinstance(two, np.ndarray))
self.assertTrue(isinstance(three, np.ndarray))
@unittest.skip("needs empty_like typing fix in npydecl.py")
def test_series_empty_like(self):
def test_impl(A):
return np.empty_like(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertTrue(isinstance(hpat_func(df.A), np.ndarray))
def test_series_fillna1(self):
def test_impl(A):
return A.fillna(5.0)
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
# test inplace fillna for named numeric series (obtained from DataFrame)
def test_series_fillna_inplace1(self):
def test_impl(A):
A.fillna(5.0, inplace=True)
return A
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str1(self):
def test_impl(A):
return A.fillna("dd")
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str_inplace1(self):
def test_impl(A):
A.fillna("dd", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
# TODO: handle string array reflection
# hpat_func(S1)
# test_impl(S2)
# np.testing.assert_array_equal(S1, S2)
def test_series_fillna_str_inplace_empty1(self):
def test_impl(A):
A.fillna("", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_str(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=['a', 'b', 'c', 'd'])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_int(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=[2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis1(self):
'''Verifies Series.dropna() implementation handles 'index' as axis argument'''
def test_impl(S):
return S.dropna(axis='index')
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis2(self):
'''Verifies Series.dropna() implementation handles 0 as axis argument'''
def test_impl(S):
return S.dropna(axis=0)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis3(self):
'''Verifies Series.dropna() implementation handles correct non-literal axis argument'''
def test_impl(S, axis):
return S.dropna(axis=axis)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
axis_values = [0, 'index']
for value in axis_values:
pd.testing.assert_series_equal(hpat_func(S1, value), test_impl(S2, value))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index1(self):
'''Verifies Series.dropna() implementation for float series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_float64:
S1 = pd.Series(data)
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index2(self):
'''Verifies Series.dropna() implementation for float series with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index1(self):
'''Verifies Series.dropna() implementation for series of strings with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index2(self):
'''Verifies Series.dropna() implementation for series of strings with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index3(self):
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], index=[1, 2, 5, 7, 10])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_float_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for float series with default index and inplace argument True'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_float_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original float series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_str_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for series of strings
with default index and inplace argument True
'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_str_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original string series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
def test_series_dropna_str_parallel1(self):
'''Verifies Series.dropna() distributed work for series of strings with default index'''
def test_impl(A):
B = A.dropna()
return (B == 'gg').sum()
hpat_func = hpat.jit(distributed=['A'])(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc', 'dd', 'gg'])
start, end = get_start_end(len(S1))
# TODO: gatherv
self.assertEqual(hpat_func(S1[start:end]), test_impl(S1))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip('AssertionError: Series are different\n'
'Series length are different\n'
'[left]: 3, Int64Index([0, 1, 2], dtype=\'int64\')\n'
'[right]: 2, Int64Index([1, 2], dtype=\'int64\')')
def test_series_dropna_dt_no_index1(self):
'''Verifies Series.dropna() implementation for datetime series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
def test_series_dropna_bool_no_index1(self):
'''Verifies Series.dropna() implementation for bool series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([True, False, False, True])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_int_no_index1(self):
'''Verifies Series.dropna() implementation for integer series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
n = 11
S1 = pd.Series(np.arange(n, dtype=np.int64))
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('numba.errors.TypingError - fix needed\n'
'Failed in hpat mode pipeline'
'(step: convert to distributed)\n'
'Invalid use of Function(<built-in function len>)'
'with argument(s) of type(s): (none)\n')
def test_series_rename1(self):
def test_impl(A):
return A.rename('B')
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A), test_impl(df.A))
def test_series_sum_default(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1., 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_sum_nan(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
# all NA case should produce 0
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Old style Series.sum() does not support parameters")
def test_series_sum_skipna_false(self):
def test_impl(S):
return S.sum(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(np.isnan(hpat_func(S)), np.isnan(test_impl(S)))
@unittest.skipIf(not hpat.config.config_pipeline_hpat_default,
"Series.sum() operator + is not implemented yet for Numba")
def test_series_sum2(self):
def test_impl(S):
return (S + S).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_prod(self):
def test_impl(S, skipna):
return S.prod(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
S = pd.Series(data)
for skipna_var in [True, False]:
actual = hpat_func(S, skipna=skipna_var)
expected = test_impl(S, skipna=skipna_var)
if np.isnan(actual) or np.isnan(expected):
# con not compare Nan != Nan directly
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_prod_skipna_default(self):
def test_impl(S):
return S.prod()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2, 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_count1(self):
def test_impl(S):
return S.count()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(['aa', 'bb', np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_mean(self):
def test_impl(S):
return S.mean()
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
with self.subTest(data=data):
S = pd.Series(data)
actual = hpat_func(S)
expected = test_impl(S)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.mean() any parameters unsupported")
def test_series_mean_skipna(self):
def test_impl(S, skipna):
return S.mean(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for skipna in [True, False]:
for data in data_samples:
S = pd.Series(data)
actual = hpat_func(S, skipna)
expected = test_impl(S, skipna)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_var1(self):
def test_impl(S):
return S.var()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_min(self):
def test_impl(S):
return S.min()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = | pd.Series(input_data) | pandas.Series |
###############################################################
# creadit to go to pymo omid alemi (omimo),
# this is a reimplementation based on original draw_stickfigure3d with
# a slate performance improvement.
#
# this implementation is based on the pymo.vis
# draw_stickfigure3d I simply did fue small changes and add on it some helpful configurations,
# so other plots I implemented will work good with this.
# this assumes that you use a preprocessed csv of the positions and a skeleton object as contained in the BVHData object
##################################################################
import os
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import pandas as pd
from pymo.parsers import BVHParser
from pymo.preprocessing import MocapParameterizer
def load_joints_and_data(bvh_example_path):
bvh_parser = BVHParser()
bvh_data = bvh_parser.parse(bvh_example_path)
print("after parse")
BVH2Pos = MocapParameterizer('position')
data_pos = BVH2Pos.fit_transform([bvh_data])
return [j for j in data_pos[0].skeleton.keys()], data_pos[0]
def fast_draw_stickfigure3d(skeleton, frame, data, joints=None, ax=None, figsize=(8, 8), alpha=0.9, set_lims=True,
plot_scatter=False):
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
if joints is None:
joints_to_draw = skeleton.keys()
else:
joints_to_draw = joints
df = data
xxs = df.iloc[frame].filter(regex=".*_Xposition")
yys = df.iloc[frame].filter(regex=".*_Yposition")
zzs = df.iloc[frame].filter(regex=".*_Zposition")
if set_lims:
r = max(max(xxs),max(yys),max(zzs))
ax.set_xlim3d(-r*0.55, r*0.55)
ax.set_ylim3d(-r*0.55, r*0.55)
ax.set_zlim3d(0, 1.1 * r)
if plot_scatter:
ax.scatter(xs=xxs.values,
ys=zzs.values,
zs=yys.values,
alpha=alpha, c='b', marker='o',s=1)
lines_X = [[df['%s_Xposition' %joint][frame] ,df['%s_Xposition' %c][frame]] for joint in joints_to_draw for c in skeleton[joint]['children'] ]
lines_Y = [[df['%s_Yposition' %joint][frame] ,df['%s_Yposition' %c][frame]] for joint in joints_to_draw for c in skeleton[joint]['children'] ]
lines_Z = [[df['%s_Zposition' %joint][frame] ,df['%s_Zposition' %c][frame]] for joint in joints_to_draw for c in skeleton[joint]['children'] ]
skel_lines = []
plot_lines = []
for x ,y ,z in zip(lines_X ,lines_Y ,lines_Z):
l = ax.plot(x ,z ,y ,'k-', lw=2, c='black' ,alpha=alpha)
plot_lines.append(l)
skel_lines.append([x ,y ,z])
return ax ,skel_lines ,plot_lines
def plot_animation(joints, data, x,idx=None, title="title",set_lims = False,frames=180,dir_to_save="animations\\styleGan_anim", r = 20,is_centered=False,down_sample=1):
if not os.path.isdir(dir_to_save):
os.makedirs(dir_to_save,exist_ok=True)
def init():
print(skel_lines)
return lines,
def animate(frame):
lines_X = [[df['%s_Xposition' % joint][frame], df['%s_Xposition' % c][frame]] for joint in joints[1:] for c in
data.skeleton[joint]['children']]
lines_Y = [[df['%s_Yposition' % joint][frame], df['%s_Yposition' % c][frame]] for joint in joints[1:] for c in
data.skeleton[joint]['children']]
lines_Z = [[df['%s_Zposition' % joint][frame], df['%s_Zposition' % c][frame]] for joint in joints[1:] for c in
data.skeleton[joint]['children']]
for l, x, y, z, r in zip(lines, lines_X, lines_Y, lines_Z, range(1000)):
x_, y_, z_ = l[0].get_data_3d()
l[0].set_data_3d(x, z, y)
return lines,
smpl = x[0]
for i in range(1):
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111, projection='3d')
if (not set_lims):
# r = 20
ax.set_xlim3d(-r * 0.55, r * 0.55)
ax.set_ylim3d(-r * 0.55, r * 0.55)
ax.set_zlim3d(0, 1.1 * r)
cols = data.values.columns[3:] if is_centered else data.values.columns
df = | pd.DataFrame(smpl, columns=cols) | pandas.DataFrame |
import pandas as pd
import censusdata
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.sources.census.etl_utils import get_state_fips_codes
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)
class CensusACSETL(ExtractTransformLoad):
def __init__(self):
self.ACS_YEAR = 2019
self.OUTPUT_PATH = (
self.DATA_PATH / "dataset" / f"census_acs_{self.ACS_YEAR}"
)
self.UNEMPLOYED_FIELD_NAME = "Unemployed civilians (percent)"
self.LINGUISTIC_ISOLATION_FIELD_NAME = "Linguistic isolation (percent)"
self.LINGUISTIC_ISOLATION_TOTAL_FIELD_NAME = (
"Linguistic isolation (total)"
)
self.LINGUISTIC_ISOLATION_FIELDS = [
"C16002_001E", # Estimate!!Total
"C16002_004E", # Estimate!!Total!!Spanish!!Limited English speaking household
"C16002_007E", # Estimate!!Total!!Other Indo-European languages!!Limited English speaking household
"C16002_010E", # Estimate!!Total!!Asian and Pacific Island languages!!Limited English speaking household
"C16002_013E", # Estimate!!Total!!Other languages!!Limited English speaking household
]
self.MEDIAN_INCOME_FIELD = "B19013_001E"
self.MEDIAN_INCOME_FIELD_NAME = (
"Median household income in the past 12 months"
)
self.POVERTY_FIELDS = [
"C17002_001E", # Estimate!!Total,
"C17002_002E", # Estimate!!Total!!Under .50
"C17002_003E", # Estimate!!Total!!.50 to .99
"C17002_004E", # Estimate!!Total!!1.00 to 1.24
"C17002_005E", # Estimate!!Total!!1.25 to 1.49
"C17002_006E", # Estimate!!Total!!1.50 to 1.84
"C17002_007E", # Estimate!!Total!!1.85 to 1.99
]
self.POVERTY_LESS_THAN_100_PERCENT_FPL_FIELD_NAME = (
"Percent of individuals < 100% Federal Poverty Line"
)
self.POVERTY_LESS_THAN_150_PERCENT_FPL_FIELD_NAME = (
"Percent of individuals < 150% Federal Poverty Line"
)
self.POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME = (
"Percent of individuals < 200% Federal Poverty Line"
)
self.STATE_GEOID_FIELD_NAME = "GEOID2"
self.df: pd.DataFrame
def _fips_from_censusdata_censusgeo(
self, censusgeo: censusdata.censusgeo
) -> str:
"""Create a FIPS code from the proprietary censusgeo index."""
fips = "".join([value for (key, value) in censusgeo.params()])
return fips
def extract(self) -> None:
dfs = []
for fips in get_state_fips_codes(self.DATA_PATH):
logger.info(
f"Downloading data for state/territory with FIPS code {fips}"
)
dfs.append(
censusdata.download(
src="acs5",
year=self.ACS_YEAR,
geo=censusdata.censusgeo(
[("state", fips), ("county", "*"), ("block group", "*")]
),
var=[
# Emploment fields
"B23025_005E",
"B23025_003E",
self.MEDIAN_INCOME_FIELD,
]
+ self.LINGUISTIC_ISOLATION_FIELDS
+ self.POVERTY_FIELDS,
)
)
self.df = | pd.concat(dfs) | pandas.concat |
import pandas as pd
import numpy
import re
from collections import defaultdict
from data.definition import LIVER_FINDINGS
from data.definition import TEMPLE_RADIO_SENTENCE_EMBEDDINGS, TEMPLE_RADIO_SENTENCES
from sentence_transformers import SentenceTransformer
def generate_clinical_bert_representation():
# Use huggingface/transformers pre-trained model Bio_ClinicalBERT for mapping tokens to embeddings
model = SentenceTransformer("emilyalsentzer/Bio_ClinicalBERT")
sentences = pd.read_csv(LIVER_FINDINGS, header=0)["Sentence 1"]
sentences = list(sentences)
| pd.DataFrame(sentences) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import time
from datetime import datetime
def timer():
return '['+datetime.now().strftime("%d/%m/%Y %H:%M:%S")+']'
from data.processed.country_code import CC #Edit when importing on the dashboard
config = {'displayModeBar': False}
path="data/external/datas_sentinel5/"
#Help plotting gauges
def bar_color(x):
if x <= 0.000020:
bar_value={ 'color': "#2bcbba", 'thickness':1 }
elif (x>0.000020) & (x<=0.000040):
bar_value={ 'color': "#26de81", 'thickness':1 }
elif (x>0.000040) & (x<=0.000060):
bar_value={ 'color': "#fed330", 'thickness':1 }
elif (x>0.000060) & (x<=0.000080):
bar_value={ 'color': "#fd9644", 'thickness':1 }
else:
bar_value={ 'color': "#fc5c65", 'thickness':1 }
return bar_value
#Help plotting gauges
def fig_gauge(fig, country_name, NO2, row=0, col=0):
return fig.add_trace(go.Indicator(
domain = {'row': row, 'column': col},
value = NO2,
mode = "gauge+number+delta",
title = {'text': country_name},
gauge = {'axis': {
'visible':False,
'range': [None, 0.0001]
},
'bar': bar_color(NO2),
'bordercolor':"white"
}))
#Handle errors if row not available for a day (Gauge plot)
def safe_execute(x):
if len(x) > 0:
return x[0]
else:
return float("NaN")
print(timer()+'[INFO] Import air polution datas, this step can take a while...')
#Get 120 lasts days
max_day=[elem.strftime("archived_%Y_%m_%d.csv.gz") for elem in pd.to_datetime(pd.Series(os.listdir(path+"archives/")), format="archived_%Y_%m_%d.csv.gz").sort_values(ascending=False).reset_index(drop=True)[0:90]]
max_day=[elem.split("_") for elem in max_day]
max_day=[[elem.lstrip("0") for elem in max_day[i]] for i in range(0,len(max_day))]
max_day=["_".join(max_day[i]) for i in range(0,len(max_day))]
try:
df
except NameError:
#Load file 120 lasts files
df= | pd.DataFrame() | pandas.DataFrame |
import io
from unittest import TestCase
import numpy as np
import numpy.testing as npt
import pandas as pd
from pandas._testing import assert_frame_equal
from fluxpart.util import (
stats2,
multifile_read_csv,
chunked_df,
HFDataReadWarning,
)
def test_stats2():
"""Test stats2 func from fluxpart.util"""
data = "7 8 4\n6 1 3\n10 6 6\n6 7 3\n8 2 4"
dtype = [("v0", int), ("v1", int), ("v2", int)]
arr = np.genfromtxt(io.BytesIO(data.encode()), dtype=dtype)
ans = stats2(arr)
npt.assert_allclose(ans.ave_v0, 37 / 5)
npt.assert_allclose(ans.ave_v1, 24 / 5)
npt.assert_allclose(ans.ave_v2, 4)
npt.assert_allclose(ans.var_v0, 14 / 5)
npt.assert_allclose(ans.var_v1, 97 / 10)
npt.assert_allclose(ans.var_v2, 3 / 2)
npt.assert_allclose(ans.cov_v0_v1, 3 / 5)
npt.assert_allclose(ans.cov_v0_v2, 2)
npt.assert_allclose(ans.cov_v1_v0, ans.cov_v0_v1)
npt.assert_allclose(ans.cov_v1_v2, 1)
npt.assert_allclose(ans.cov_v2_v0, ans.cov_v0_v2)
npt.assert_allclose(ans.cov_v2_v1, ans.cov_v1_v2)
data = "7 8 4\n6 1 3\n10 6 6\n6 7 3\n8 2 4"
dtype = [("v0", int), ("v1", int), ("v2", int)]
arr = np.genfromtxt(io.BytesIO(data.encode()), dtype=dtype)
ans = stats2(arr, names=("v0", "v2"))
npt.assert_allclose(ans.ave_v0, 37 / 5)
npt.assert_allclose(ans.ave_v2, 4)
npt.assert_allclose(ans.var_v0, 14 / 5)
npt.assert_allclose(ans.var_v2, 3 / 2)
npt.assert_allclose(ans.cov_v0_v2, 2)
npt.assert_allclose(ans.cov_v2_v0, ans.cov_v0_v2)
assert not hasattr(ans, "ave_v1")
assert not hasattr(ans, "var_v1")
assert not hasattr(ans, "cov_v0_v1")
assert not hasattr(ans, "cov_v1_v0")
assert not hasattr(ans, "cov_v1_v2")
assert not hasattr(ans, "cov_v2_v1")
def test_mulitifile_read_csv():
file1 = io.BytesIO("1,2,3\n4,5,6\n7,8,9\n10,11,12".encode())
file2 = io.BytesIO("13,14,15\n16,17,18\n19,20,21\n22,23,24".encode())
reader = multifile_read_csv([file1, file2], header=None)
dfs = [
pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]),
pd.DataFrame([[13, 14, 15], [16, 17, 18], [19, 20, 21], [22, 23, 24]]),
]
for cnt, df in enumerate(reader):
| assert_frame_equal(df, dfs[cnt]) | pandas._testing.assert_frame_equal |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Processing workflow for the 1986 National Geographic Smell Survey data
import pandas as pd
import pyrfume
from pyrfume.odorants import from_cids, get_cids
df = pd.read_csv('NGS.csv', index_col=0).astype(int) # Load the data
df.index.name = 'Subject' # The index column is the subject number
data_dict = pd.read_excel('Data dictionary.xlsx', index_col=0) # Load the data dictionary
# Determine which integer value, if any, is used for no response (usually 0)
has_non_response_option = data_dict[data_dict['VALUES'].str.contains('No response') == True]
value_for_nan = has_non_response_option['VALUES'].apply(lambda x: x.split('=')[0]).astype(int)
# Replace the value for no response with Python `None`)
df = df.apply(lambda col: col.replace(value_for_nan.get(col.name, None), None))
# +
# Odorant abbreviations used in the column names
odorant_abbreviations = {'AND': 'Androstenone',
'AA': 'Isoamyl acetate',
'AMY': 'Isoamyl acetate',
'GAL': 'Galaxolide',
'GALAX': 'Galaxolide',
'EUG': 'Eugenol',
'MER': 'Mercaptans',
'MERCAP': 'Mercaptans',
'ROSE': 'Rose'}
# Question abbreviations used in the column names (see data dictionary for full question)
question_abbreviations = {'SMELL': 'Smell',
'QUAL': 'Quality',
'INT': 'Intensity',
'MEM': 'Memorable',
'EAT': 'Edible',
'WEAR': 'Wearable',
'DES': 'Descriptor'}
# List of unique odorant names
odorant_names = list(set(odorant_abbreviations.values()))
# -
# All (meta)-data not concerning the odorants themselves, i.e. information abou the subjects
metadata = df[[col for col in df if not any([col.startswith('%s_' % x) for x in odorant_abbreviations])]]
metadata.head()
# Save this subject data
metadata.to_csv('subjects.csv')
# +
# All data concerning the odorants themselves
data = df[[col for col in df if any([col.startswith('%s_' % x) for x in odorant_abbreviations])]]
def f(s):
"""Convert e.g. 'AA_QUAL' into ('Amyl Acetate', 'Quality')"""
odorant, question = s.split('_')
return odorant_abbreviations[odorant], question_abbreviations[question]
# Turn column header into a multiindex with odorants names and questions as separate levels
data.columns = pd.MultiIndex.from_tuples(data.columns.map(f).tolist(), names=('Odorant', 'Question'))
data.head()
# -
# From methods.txt
# PEA added due to common knowledge that
# this is primary ingredient of IFF rose
molecule_names = ['5a-androst-16-en-3-one',
'isoamyl acetate',
'Galaxolide',
'eugenol',
'tert-butyl mercaptan',
'isopropyl mercaptan',
'n-propyl mercaptan',
'sec-butyl mercaptan',
'phenyl ethyl alcohol']
# Get PubChem IDs for each odorant
names_to_cids = get_cids(molecule_names)
# Generate information about molecules
cids = list(names_to_cids.values())
molecules = pd.DataFrame(from_cids(cids)).set_index('CID').sort_index()
molecules.head()
names_to_cids
# Save this molecule data
molecules.to_csv('molecules.csv')
mixtures = pd.DataFrame(index=odorant_names, columns=[0]+cids)
# v/v * components ratios
mixtures.loc['Mercaptans'] = 0.04*pd.Series({6387: 0.76,
6364: 0.18,
7848: 0.04,
10560: 0.02})
mixtures.loc['Androstenone'] = 0.001*pd.Series({6852393: 1})
mixtures.loc['Isoamyl acetate'] = 1*pd.Series({31276: 1})
mixtures.loc['Eugenol'] = 1*pd.Series({3314: 1})
mixtures.loc['Galaxolide'] = 0.425*pd.Series({91497: 1})
# Using common knowledge that IFF Rose is ~40% PEA
mixtures.loc['Rose'] = 0.8* | pd.Series({6054: 0.4, 0: 0.6}) | pandas.Series |
#!/usr/bin/env python
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from os import path
_README="""
A script to generate confusion matrix and evaluate accuracy of rfmix.
-<NAME> (magu[at]stanford[dot]edu)
"""
_TODO="""
1. modify output directory (currently the output text file is stored in the directory that this script is called from).
"""
# quick check if we're on galangal
import platform
if platform.uname()[1]=='galangal.stanford.edu':
print('error: script must be modified to be run on galangal')
else:
# assume we're on sherlock -- load modules and check versions
print('Assuming we are on sherlock')
# define functions
def load_data(args_dict):
print('Loading in data')
## check if output file already exists
if path.exists(args_dict['output-filename']):
print('Error: output file already exists. Aborting script.')
return '', '', True
## load y and yhat_raw
data_dir='/scratch/users/magu/deepmix/data/simulated_chr20/'
yhat_raw=pd.read_table(data_dir+'vcf/rf_out/'+args_dict['rfmix-result-filename'], skiprows=1)
y=np.load(data_dir+'label/'+args_dict['gt-filename'])
return y, yhat_raw, False
def expand_rfmix_windows(y, yhat_raw, S):
print('Expanding rfmix windows')
V_pos=y['V'][:,1].astype(int)
yhat=pd.DataFrame(index=['_'.join(s) for s in y['V']], columns=S)
for ix in range(yhat_raw.shape[0]):
ids=(yhat_raw.iloc[ix,1] <= V_pos) & (V_pos <= yhat_raw.iloc[ix,2])
yhat.iloc[ids,:]=np.vstack([yhat_raw.iloc[ix,6:] for _ in range(sum(ids))]).astype(int)+1
return yhat
def evaluate_model(y, yhat, args_dict):
print('Evaluating model and creating text file')
## create df of confusion matrices and evaluate accuracy
# confusion
cm=confusion_matrix(y['L'].flatten(), yhat.T.values.flatten().astype(int))
# accuracy
acc=np.sum(np.diag(cm))/np.sum(cm)
anc_label=['AFR', 'EAS', 'EUR', 'NAT', 'SAS']
row_normalized_df = | pd.DataFrame(cm, index=anc_label, columns=anc_label) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: <NAME> <<EMAIL>>
@brief: feature combiner
"""
import os
import sys
import imp
from optparse import OptionParser
import scipy
import pandas as pd
import numpy as np
import config
from config import TRAIN_SIZE
from utils import logging_utils, time_utils, pkl_utils, np_utils
splitter_level1 = pkl_utils._load("%s/splits_level1.pkl"%config.SPLIT_DIR)
splitter_level2 = pkl_utils._load("%s/splits_level2.pkl"%config.SPLIT_DIR)
splitter_level3 = pkl_utils._load("%s/splits_level3.pkl"%config.SPLIT_DIR)
assert len(splitter_level1) == len(splitter_level2)
assert len(splitter_level1) == len(splitter_level3)
n_iter = len(splitter_level1)
class Combiner:
def __init__(self, feature_dict, feature_name, feature_suffix=".pkl", corr_threshold=0):
self.feature_name = feature_name
self.feature_dict = feature_dict
self.feature_suffix = feature_suffix
self.corr_threshold = corr_threshold
self.feature_names_basic = []
self.feature_names_cv = []
self.feature_names = []
self.basic_only = 0
logname = "feature_combiner_%s_%s.log"%(feature_name, time_utils._timestamp())
self.logger = logging_utils._get_logger(config.LOG_DIR, logname)
self.splitter = splitter_level1
self.n_iter = n_iter
def load_feature(self, feature_dir, feature_name):
fname = os.path.join(feature_dir, feature_name+self.feature_suffix)
return pkl_utils._load(fname)
def combine(self):
dfAll = pkl_utils._load(config.INFO_DATA)
dfAll_raw = dfAll.copy()
y_train = dfAll["relevance"].values[:TRAIN_SIZE]
## for basic features
feat_cnt = 0
self.logger.info("Run for basic...")
for file_name in sorted(os.listdir(config.FEAT_DIR)):
if self.feature_suffix in file_name:
fname = file_name.split(".")[0]
if fname not in self.feature_dict:
continue
x = self.load_feature(config.FEAT_DIR, fname)
x = np.nan_to_num(x)
if np.isnan(x).any():
self.logger.info("%s nan"%fname)
continue
# apply feature transform
mandatory = self.feature_dict[fname][0]
transformer = self.feature_dict[fname][1]
x = transformer.fit_transform(x)
dim = np_utils._dim(x)
if dim == 1:
corr = np_utils._corr(x[:TRAIN_SIZE], y_train)
if not mandatory and abs(corr) < self.corr_threshold:
self.logger.info("Drop: {} ({}D) (abs corr = {}, < threshold = {})".format(
fname, dim, abs(corr), self.corr_threshold))
continue
dfAll[fname] = x
self.feature_names.append(fname)
else:
columns = ["%s_%d"%(fname, x) for x in range(dim)]
df = pd.DataFrame(x, columns=columns)
dfAll = pd.concat([dfAll, df], axis=1)
self.feature_names.extend(columns)
feat_cnt += 1
self.feature_names_basic.append(fname)
if dim == 1:
self.logger.info("Combine {:>3}/{:>3} feat: {} ({}D) (corr = {})".format(
feat_cnt, len(self.feature_dict.keys()), fname, dim, corr))
else:
self.logger.info("Combine {:>3}/{:>3} feat: {} ({}D)".format(
feat_cnt, len(self.feature_dict.keys()), fname, dim))
dfAll.fillna(config.MISSING_VALUE_NUMERIC, inplace=True)
## basic
dfTrain = dfAll.iloc[:TRAIN_SIZE].copy()
self.y_train = dfTrain["relevance"].values.astype(float)
dfTrain.drop(["id","relevance"], axis=1, inplace=True)
self.X_train = dfTrain.values.astype(float)
dfTest = dfAll.iloc[TRAIN_SIZE:].copy()
self.id_test = dfTest["id"].values.astype(int)
dfTest.drop(["id","relevance"], axis=1, inplace=True)
self.X_test = dfTest.values.astype(float)
## all
first = True
feat_cv_cnt = 0
dfAll_cv_all = dfAll_raw.copy()
feature_dir = "%s/All" % (config.FEAT_DIR)
for file_name in sorted(os.listdir(feature_dir)):
if self.feature_suffix in file_name:
fname = file_name.split(".")[0]
if fname not in self.feature_dict:
continue
if first:
self.logger.info("Run for all...")
first = False
x = self.load_feature(feature_dir, fname)
x = np.nan_to_num(x)
if np.isnan(x).any():
self.logger.info("%s nan"%fname)
continue
# apply feature transform
mandatory = self.feature_dict[fname][0]
transformer = self.feature_dict[fname][1]
x = transformer.fit_transform(x)
dim = np_utils._dim(x)
if dim == 1:
corr = np_utils._corr(x[:TRAIN_SIZE], y_train)
if not mandatory and abs(corr) < self.corr_threshold:
self.logger.info("Drop: {} ({}D) (abs corr = {}, < threshold = {})".format(
fname, dim, abs(corr), self.corr_threshold))
continue
dfAll_cv_all[fname] = x
self.feature_names.append(fname)
else:
columns = ["%s_%d"%(fname, x) for x in range(dim)]
df = | pd.DataFrame(x, columns=columns) | pandas.DataFrame |
"""
This code was modified on top of Google tensorflow
(https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/using_your_own_dataset.md)
This code works similar to `label-maker package` when used with Label Maker and Tensor Flow object detection API.
To create a correct training data set for Tensor Flow Object Detection, we recommend you:
1. After running `label-maker images`, do `git clone https://github.com/tensorflow/models.git`
2. Install TensorFlow object detection by following this: https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md
3. From your Label Maker, copy `tiles` folder, this code `tf_records_generation.py` and `labels.py` to Tensorflow object detecrtion directory
4. From directory `tensorflow/models/research/` run:
python tf_records_generation.py --label_input=labels.npz \
--train_rd_path=data/train_buildings.record \
--test_rd_path=data/test_buildings.record
"""
#cansat tags for a while
tags = ["buildings", "highway", "aerodrom", "apron", "runway", "taxiway", "grassland", "heath", "scrub", "water", "wood", "farmland", "grass", "residential", "ditch", "river"]
import os
import io
import numpy as np
from os import makedirs, path as op
import shutil
import pandas as pd
import tensorflow as tf
from PIL import Image
from utils import dataset_util
from collections import namedtuple
flags = tf.app.flags
flags.DEFINE_string('label_input', '', 'Path to the labels.npz input')
flags.DEFINE_string('tiles_input', '', 'Path to the tiles input')
flags.DEFINE_string('train_tf_path', '', 'Path to the train input')
flags.DEFINE_string('test_tf_path', '', 'Path to the test input')
flags.DEFINE_string('train_rd_path', '', 'Path to output TFRecord')
flags.DEFINE_string('test_rd_path', '', 'Path to output TFRecord')
FLAGS = flags.FLAGS
def class_text_to_int(row_label):
return tags[row_label]
def split(df, group):
data = namedtuple('data', ['filename', 'object'])
gb = df.groupby(group)
return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
def create_tf_example(group, path):
"""Creates a tf.Example proto from sample buillding image tile.
Args:
encoded_building_image_data: The jpg encoded data of the building image.
Returns:
example: The created tf.Example.
"""
with tf.gfile.GFile(op.join(path, '{}'.format(group.filename)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
filename = group.filename.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for _, row in group.object.iterrows():
xmins.append(row['xmin'] / width)
xmaxs.append(row['xmax'] / width)
ymins.append(row['ymin'] / height)
ymaxs.append(row['ymax'] / height)
classes_text.append(tags[row['class_num']].encode('utf8'))
classes.append(row['class_num'])
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def main(_):
labels = np.load(op.join(os.getcwd(), FLAGS.label_input))
tile_names = [tile for tile in labels.files]
tile_names.sort()
tiles = np.array(tile_names)
tf_tiles_info = []
for tile in tiles:
bboxes = labels[tile].tolist()
width = 256
height = 256
if bboxes:
for bbox in bboxes:
class_num = bbox[4]
bbox = [max(0, min(255, x)) for x in bbox[0:4]]
y = ["{}.jpg".format(tile), width, height, class_num, bbox[0], bbox[1], bbox[2], bbox[3]]
tf_tiles_info.append(y)
split_index = int(len(tf_tiles_info) * 0.8)
column_name = ['filename', 'width', 'height', 'class_num', 'xmin', 'ymin', 'xmax', 'ymax']
df = | pd.DataFrame(tf_tiles_info, columns=column_name) | pandas.DataFrame |
# coding: utf-8
# # Creating a dataset of Ohio injection wells
import matplotlib.pyplot as plt
import random
import numpy as np
import pandas as pd
import os
# set datadir to the directory that holds the zipfile
datadir = 'c:\MyDocs/sandbox/data/datasets/FracFocus/'
outdir = datadir+'output/'
indir = datadir+'OH_injection/'
tempf = outdir+'temp.csv'
tempf1 = outdir+'temp1.csv'
pre_four = outdir+'pre_four.csv'
# print(os.listdir(indir))
# input files are in three different formats:
# oldest: tuple (filename,yr,q)
# all columns are named the same!!
fn_old = [('OH_1ST QUARTER 2011 BRINE DISPOSAL FEES.xls',2011,1),
('OH_2ND QUARTER 2011 BRINE DISPOSAL FEES.xls',2011,2),
('OH_3RD QUARTER 2011 BRINE DISPOSAL FEES-1.xls',2011,3),
('OH_4TH QUARTER 2010 BRINE DISPOSAL FEES.xls',2010,4),
('OH_4TH QUARTER 2011 BRINE DISPOSAL FEES.xls',2011,4),
('OH_Brine Disposal Fee - 3rd Quarter 2010-2.xls',2010,3)]
# the 2012 file is ina funky state - the set of worksheets have two different formats: a blend of old and main
# so we have to process it separately
fn_2012 = 'OH_BRINE DISPOSAL FEES FOR 2012.xls'
# fn_2012 = 'OH_BRINE DISPOSAL FEES FOR 2012 CORRECTED.xlsx'
# bulk of the data are here - first four worksheets are quarters. Total worksheet ignored
# tuple: (filename,year)
fn_main = [('BRINE DISPOSAL FEES FOR 2013.xlsx',2013),
('BRINE DISPOSAL FEES FOR 2014.xlsx',2014),
('BRINE DISPOSAL FEES FOR 2015.xlsx',2015),
('BRINE DISPOSAL FEES FOR 2016.xlsx',2016),
('BRINE DISPOSAL FEES FOR 2017.xlsx',2017)]
# current files are of a different format.
fn_2018_etc = [('BRINE DISPOSAL FEES FOR 2018.xlsx',2018),
('BRINE DISPOSAL FEES FOR 2019.xlsx',2019)]
SWDfn = indir+'Copy of SWD locations - July_2018.xls'
ODNR_permit_pickle = outdir+'ODNR_permit.pkl'
ODNR_injection_pickle = outdir+'ODNR_injection.pkl'
inj_excel = outdir+'Inject_wide.xlsx'
# In[59]:
t = pd.read_pickle(ODNR_injection_pickle)
x = t[t.Owner.str.contains('HUNTER')]
t.to_csv(tempf)
# ## get oldest data
# In[60]:
dlst = []
for fnl in fn_old:
print(fnl)
fn = fnl[0]
yr = fnl[1]
quar = fnl[2]
# print(fn,yr,quar)
d = pd.read_excel(indir+fn,skiprows=5,header=None,usecols=[7,8,10,11],
names=['CompanyName','APIstr','Vol','In_Out'])
d.Vol = d.Vol.where(d.Vol.str.lower().str.strip()!='zero',0)
d.Vol = pd.to_numeric(d.Vol)
dIn = d[d.In_Out.str.lower().str[0]=='i']
dIn = dIn.filter(['CompanyName','APIstr','Vol'])
dIn.columns = ['CompanyName','APIstr','Vol_InDist']
dOut = d[d.In_Out.str.lower().str[0]=='o']
dOut = dOut.filter(['APIstr','Vol'])
dOut.columns = ['APIstr','Vol_OutDist']
d['Year'] = fnl[1]
d['Quarter'] = fnl[2]
mg = pd.merge(dIn,dOut,how='outer',left_on='APIstr',right_on='APIstr')
mg['Year'] = fnl[1]
mg['Quarter'] = fnl[2]
dlst.append(mg)
old = pd.concat(dlst)
old.to_csv(tempf)
# In[61]:
old.info()
# ## process the 2012 file
# In[62]:
dlst = []
uc1 = [1,2,4,8]
uc2 = [7,8,10,14]
for ws in [0,1,2,3]: # ws 1 is like 'main'; others like 'old'
# print(ws)
if ws == 1:
uc = uc1
else:
uc= uc2
# print(uc)
d = pd.read_excel(indir+fn_2012,skiprows=6,sheet_name=ws,
usecols=uc,header=None,
names=['CompanyName','APIstr','Vol_InDist','Vol_OutDist'])
d = d.dropna(axis=0,subset=['CompanyName'])
d['Year'] = 2012
d['Quarter'] = ws+1
dlst.append(d)
if ws==1:
tmp = d
trans2012 = | pd.concat(dlst) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# ## Import Libraries
# Let's import some libraries to get started!
# In[73]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# ## The Data
#
# Let's start by reading in the titanic_train.csv file into a pandas dataframe.
# In[74]:
train = pd.read_csv('titanic_train.csv')
# In[75]:
train.head()
# # Exploratory Data Analysis
#
# Let's begin some exploratory data analysis! We'll start by checking out missing data!
#
# ## Missing Data
#
# We can use seaborn to create a simple heatmap to see where we are missing data!
# In[76]:
sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis')
# Roughly 20 percent of the Age data is missing. The proportion of Age missing is likely small enough for reasonable replacement with some form of imputation. Looking at the Cabin column, it looks like we are just missing too much of that data to do something useful with at a basic level. We'll probably drop this later, or change it to another feature like "Cabin Known: 1 or 0"
#
# Let's continue on by visualizing some more of the data! Check out the video for full explanations over these plots, this code is just to serve as reference.
# In[77]:
sns.set_style('whitegrid')
sns.countplot(x='Survived',data=train,palette='RdBu_r')
# In[78]:
sns.set_style('whitegrid')
sns.countplot(x='Survived',hue='Sex',data=train,palette='RdBu_r')
# In[79]:
sns.set_style('whitegrid')
sns.countplot(x='Survived',hue='Pclass',data=train,palette='rainbow')
# In[80]:
sns.distplot(train['Age'].dropna(),kde=False,color='darkred',bins=30)
# In[81]:
train['Age'].hist(bins=30,color='darkred',alpha=0.7)
# In[82]:
sns.countplot(x='SibSp',data=train)
# In[83]:
train['Fare'].hist(color='green',bins=40,figsize=(8,4))
# ____
# ### Cufflinks for plots
# ___
# Let's take a quick moment to show an example of cufflinks!
# In[84]:
import cufflinks as cf
cf.go_offline()
# In[85]:
train['Fare'].iplot(kind='hist',bins=30,color='green')
# ___
# ## Data Cleaning
# We want to fill in missing age data instead of just dropping the missing age data rows. One way to do this is by filling in the mean age of all the passengers (imputation).
# However we can be smarter about this and check the average age by passenger class. For example:
#
# In[86]:
plt.figure(figsize=(12, 7))
sns.boxplot(x='Pclass',y='Age',data=train,palette='winter')
# We can see the wealthier passengers in the higher classes tend to be older, which makes sense. We'll use these average age values to impute based on Pclass for Age.
# In[87]:
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if | pd.isnull(Age) | pandas.isnull |
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" data baker dataset
Speech corpus is a female who pronounces standard Mandarin.
It includes about 12 hours and is composed of 10000 sentences.
detailed information can be seen on https://www.data-baker.com/open_source.html
"""
import os
import sys
import codecs
import tempfile
import re
import pandas
from six.moves import urllib
from absl import logging
from sklearn.model_selection import train_test_split
import tensorflow as tf
import rarfile
from athena import get_wave_file_length
# SUBSETS = ["train", "dev", "test"]
GFILE = tf.compat.v1.gfile
URL = "https://weixinxcxdb.oss-cn-beijing.aliyuncs.com/gwYinPinKu/BZNSYP.rar"
# ascii code, used to delete Chinese punctuation
CHN_PUNC_LIST = [183, 215, 8212, 8216, 8217, 8220, 8221, 8230,
12289, 12290, 12298, 12299, 12302, 12303, 12304, 12305,
65281, 65288, 65289, 65292, 65306, 65307, 65311]
CHN_PUNC_SET = set(CHN_PUNC_LIST)
MANDARIN_INITIAL_LIST = ["b", "ch", "c", "d", "f", "g", "h", "j",\
"k", "l", "m", "n", "p", "q", "r", "sh", "s", "t", "x", "zh", "z"]
# prosody phone list
CHN_PHONE_PUNC_LIST = ['sp2', 'sp1', 'sil']
# erhua phoneme
CODE_ERX = 0x513F
def _update_insert_pos(old_pos, pylist):
new_pos = old_pos + 1
i = new_pos
while i < len(pylist)-1:
# if the first letter is upper, then this is the phoneme of English letter
if pylist[i][0].isupper():
i += 1
new_pos += 1
else:
break
return new_pos
def _pinyin_preprocess(line, words):
if line.find('.') >= 0:
# remove '.' in English letter phonemes, for example: 'EH1 F . EY1 CH . P IY1'
py_list = line.replace('/', '').strip().split('.')
py_str = ''.join(py_list)
pinyin = py_str.split()
else:
pinyin = line.replace('/', '').strip().split()
# now the content in pinyin like: ['OW1', 'K', 'Y', 'UW1', 'JH', 'EY1', 'shi4', 'yi2', 'ge4']
insert_pos = _update_insert_pos(-1, pinyin)
i = 0
while i < len(words):
if ord(words[i]) in CHN_PUNC_SET:
i += 1
continue
if words[i] == '#' and (words[i+1] >= '1' and words[i+1] <= '4'):
if words[i+1] == '1':
pass
else:
if words[i+1] == '2':
pinyin.insert(insert_pos, 'sp2')
if words[i+1] == '3':
pinyin.insert(insert_pos, 'sp2')
elif words[i+1] == '4':
pinyin.append('sil')
break
insert_pos = _update_insert_pos(insert_pos, pinyin)
i += 2
elif ord(words[i]) == CODE_ERX:
if pinyin[insert_pos-1].find('er') != 0: # erhua
i += 1
else:
insert_pos = _update_insert_pos(insert_pos, pinyin)
i += 1
# skip non-mandarin characters, including A-Z, a-z, Greece letters, etc.
elif ord(words[i]) < 0x4E00 or ord(words[i]) > 0x9FA5:
i += 1
else:
insert_pos = _update_insert_pos(insert_pos, pinyin)
i += 1
return pinyin
def _pinyin_2_initialfinal(py):
"""
used to split pinyin into intial and final phonemes
"""
if py[0] == 'a' or py[0] == 'e' or py[0] == 'E' or py[0] == 'o' or py[:2] == 'ng' or \
py[:2] == 'hm':
py_initial = ''
py_final = py
elif py[0] == 'y':
py_initial = ''
if py[1] == 'u' or py[1] == 'v':
py_final = list(py[1:])
py_final[0] = 'v'
py_final = ''.join(py_final)
elif py[1] == 'i':
py_final = py[1:]
else:
py_final = list(py)
py_final[0] = 'i'
py_final = ''.join(py_final)
elif py[0] == 'w':
py_initial = ''
if py[1] == 'u':
py_final = py[1:]
else:
py_final = list(py)
py_final[0] = 'u'
py_final = ''.join(py_final)
else:
init_cand = ''
for init in MANDARIN_INITIAL_LIST:
init_len = len(init)
init_cand = py[:init_len]
if init_cand == init:
break
if init_cand == '':
raise Exception('unexpected')
py_initial = init_cand
py_final = py[init_len:]
if (py_initial in set(['j', 'q', 'x']) and py_final[0] == 'u'):
py_final = list(py_final)
py_final[0] = 'v'
py_final = ''.join(py_final)
if py_final[-1] == '6':
py_final = py_final.replace('6', '2')
return (py_initial, py_final)
def is_all_eng(words):
#if include mandarin
for word in words:
if ord(word) >= 0x4E00 and ord(word) <= 0x9FA5:
return False
return True
def pinyin_2_phoneme(pinyin_line, words):
#chn or chn+eng
sent_phoneme = ['sp1']
if not is_all_eng(words):
sent_py = _pinyin_preprocess(pinyin_line, words)
for py in sent_py:
if py[0].isupper() or py in CHN_PHONE_PUNC_LIST:
sent_phoneme.append(py)
else:
initial, final = _pinyin_2_initialfinal(py)
if initial == '':
sent_phoneme.append(final)
else:
sent_phoneme.append(initial)
sent_phoneme.append(final)
else:
wordlist = words.split(' ')
word_phonelist = pinyin_line.strip().split('/')
assert(len(word_phonelist) == len(wordlist))
i = 0
while i < len(word_phonelist):
phone = re.split(r'[ .]', word_phonelist[i])
for p in phone:
if p:
sent_phoneme.append(p)
if '/' in wordlist[i]:
sent_phoneme.append('sp2')
elif '%' in wordlist[i]:
if i != len(word_phonelist)-1:
sent_phoneme.append('sp2')
else:
sent_phoneme.append('sil')
i += 1
return ' '.join(sent_phoneme)
def download_and_extract(directory, url):
"""Download and extract the given dataset.
Args:
directory: the directory where to extract the tarball.
url: the url to download the data file.
"""
_, rar_filepath = tempfile.mkstemp(suffix=".rar") # get rar_path
try:
logging.info("Downloading %s to %s" % (url, rar_filepath))
def _progress(count, block_size, total_size):
sys.stdout.write(
"\r > > Downloading {} {:.1f}%".format(
rar_filepath, 100.0 * count * block_size / total_size
)
)
sys.stdout.flush()
urllib.request.urlretrieve(url, rar_filepath, _progress) # show the progress of download
statinfo = os.stat(rar_filepath) # run a stat
logging.info(
"Successfully downloaded %s, size(bytes): %d" % (url, statinfo.st_size) # size-->bytes
)
rf = rarfile.RarFile(rar_filepath) # need to install unrar!!!!
rf.extractall(directory)
finally:
GFILE.Remove(rar_filepath)
def trans_prosody(dataset_dir):
trans_path = os.path.join(dataset_dir, "BZNSYP/ProsodyLabeling/")
is_sentid_line = True
with open(trans_path + '000001-010000.txt', encoding='utf-8') as f,\
open(trans_path + 'biaobei_prosody.csv', 'w') as fw:
for line in f:
if is_sentid_line:
sent_id = line.split()[0]
words = line.split('\t')[1].strip()
else:
sent_phonemes = pinyin_2_phoneme(line, words)
fw.writelines('|'.join([sent_id, sent_phonemes, sent_phonemes]) + '\n')
is_sentid_line = not is_sentid_line
def convert_audio_and_split_transcript(dataset_dir, total_csv_path):
"""Convert rar to WAV and split the transcript.
Args:
dataset_dir : the directory which holds the input dataset.
total_csv_path : the resulting output csv file.
BZNSYP dir Tree structure:
BZNSYP
-ProsodyLabeling
-000001-010000.txt
-biaobei_prosody.csv
-Wave
-000001.wav
-000002.wav
...
-PhoneLabeling
-000001.interval
-000002.interval
...
"""
logging.info("ProcessingA audio and transcript for {}".format("all_files"))
audio_dir = os.path.join(dataset_dir, "BZNSYP/Wave/")
prosodyLabel_dir = os.path.join(dataset_dir, "BZNSYP/ProsodyLabeling/")
files = []
# ProsodyLabel ---word
with codecs.open(os.path.join(prosodyLabel_dir, "biaobei_prosody.csv"), "r",
encoding="utf-8") as f:
for line in f:
transcript_id = line.strip().split("|")[0]
transcript = line.strip().split("|")[1]
# get wav_length
wav_file = os.path.join(audio_dir, transcript_id + '.wav')
wav_length = get_wave_file_length(wav_file)
files.append((os.path.abspath(wav_file), wav_length, transcript))
# Write to CSV file which contains three columns:
df = pandas.DataFrame(
data=files, columns=["wav_filename", "wav_length_ms", "transcript"]
)
df.to_csv(total_csv_path, index=False)
logging.info("Successfully generated csv file {}".format(total_csv_path))
def split_train_dev_test(total_csv, output_dir):
# get total_csv
data = | pandas.read_csv(total_csv) | pandas.read_csv |
# Package imports
import pandas as pd
import requests
import datetime
from unidecode import unidecode as UnicodeFormatter
import os
import bcolors
# Local imports
import path_configuration
import url_configuration
import progress_calculator
class GrandPrix(object):
Url = None
Path = None
Requests = None
def __init__(self):
self.Url = url_configuration.Url_builder()
self.Path = path_configuration.Path()
self.Requests = requests
def import_grand_prix(self):
content = os.listdir(self.Path.get_season_path())
content.sort()
"""for year in content:
DataFrame = pd.read_csv(Path.get_season_path()+year)
print(DataFrame)"""
DataFrame = pd.read_csv(self.Path.get_season_path()+'2019.csv')
Date = list(DataFrame['Date'])
GrandPrix = list(DataFrame['Grand Prix'])
Round = list(DataFrame['Round'])
Date_obj = []
# DATE OBJ
for date in Date:
Date_obj.append(datetime.datetime.strptime(date, '%Y-%m-%d'))
Progress = progress_calculator.ProgressBar(Round)
# WHILE - BY GPS OF THE YEAR
i = 0
while i < Round.__len__():
# CHECK YEAR
if Date_obj[i] < datetime.datetime.now():
# METHOD CALLS
print(bcolors.PASS + 'STARTING EXTRACTOR, GETTING FROM', GrandPrix[i], 'DATE:', Date[i] + bcolors.END)
self.drivers_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.contructors_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.pitstops_times_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.result_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.by_lap_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.current_driver_standings(Round[i], Date_obj[i].year, GrandPrix[i])
self.status(Round[i], Date_obj[i].year, GrandPrix[i])
if Date_obj[i].year > 2017:
url = self.Url.f1_url(Date_obj[i].year, Date_obj[i].date(), GrandPrix[i])
self.load_data_from_f1(url, Date_obj[i].year, GrandPrix[i])
Progress.get_progress_bar()
i = i + 1
def drivers_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING DRIVERS BY RACE...', gp_name + bcolors.END)
url = self.Url.url_driver(round, year)
page = self.Requests.get(url)
json = page.json()
json = json['MRData']
json = json['DriverTable']
Drivers = json['Drivers']
DriversID = []
DriversInitials = []
DriversName = []
YearsOld = []
for driver in Drivers:
DriversID.append(driver['driverId'])
DriversInitials.append(driver['code'])
DriversName.append(UnicodeFormatter(driver['givenName']+' '+driver['familyName']))
YearsOld.append(
datetime.datetime.now().year - datetime.datetime.strptime(driver['dateOfBirth'], '%Y-%m-%d').year
)
Drivers_Dict = {'Driver ID': DriversID, 'Driver Initials': DriversInitials,
'Driver Name': DriversName, 'Years Old': YearsOld}
Drivers_Data = pd.DataFrame(data=Drivers_Dict)
Path = self.Path.grandprix_path(year, gp_name, 'Drivers')
Drivers_Data.to_csv(Path)
def contructors_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING CONSTRUCTORS BY RACE...', gp_name + bcolors.END)
url = self.Url.url_constructor(round, year)
page = self.Requests.get(url)
json = page.json()
json = json['MRData']
json = json['ConstructorTable']
Constructors = json['Constructors']
ConstructorID = []
ConstructorName = []
for constructor in Constructors:
ConstructorID.append(constructor['constructorId'])
ConstructorName.append(constructor['name'])
Constructors_Dict = {"Constructor ID": ConstructorID, "Constructor Name": ConstructorName}
Constructor_Data = pd.DataFrame(data=Constructors_Dict)
Path = self.Path.grandprix_path(year, gp_name, 'Constructors')
Constructor_Data.to_csv(Path)
def pitstops_times_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING PITSTOPS BY RACE...', gp_name + bcolors.END)
url = self.Url.url_pitstops_time(round, year)
page = self.Requests.get(url)
json = page.json()
json = json['MRData']
json = json['RaceTable']
Race = json['Races'][0]
PitStops = Race['PitStops']
DriverID = []
Corresponding_Lap = []
Driver_Stop_Number = []
PitStop_Time = []
for pitstop in PitStops:
DriverID.append(pitstop['driverId'])
Corresponding_Lap.append(pitstop['lap'])
Driver_Stop_Number.append(pitstop['stop'])
PitStop_Time.append(pitstop['duration'])
PitStop_Dict = {'Pit Stop Lap': Corresponding_Lap, 'Driver ID': DriverID, 'Pit Stop Number': Driver_Stop_Number,
'Pit Stop Time': PitStop_Time}
PitStop_Data = pd.DataFrame(data=PitStop_Dict)
Path = self.Path.grandprix_path(year, gp_name, 'PitStop')
PitStop_Data.to_csv(Path)
def result_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING RESULT BY RACE...', gp_name + bcolors.END)
url = self.Url.url_results(round, year)
page = self.Requests.get(url)
json = page.json()
json = json['MRData']
json = json['RaceTable']
Race = json['Races'][0]
Results = Race['Results']
DriverPosition = []
DriverGridPosition = []
DriverID = []
ConstructorID = []
TimeToLeader = []
RaceStatus = []
FastestLapRank = []
AverageSpeed = []
for result in Results:
# DRIVER POSITION
if result['positionText'] == 'R':
DriverPosition.append(None)
else:
DriverPosition.append(result['positionText'])
# GRID
DriverGridPosition.append(result['grid'])
# DRIVER ID
DriverID.append(result['Driver']['driverId'])
# CONSTRUCTOR ID
ConstructorID.append(result['Constructor']['constructorId'])
# TIME TO LEADER
if result['position'] == '1':
TimeToLeader.append("0")
elif result['status'] != 'Finished':
Check = result['status']
if Check[0] == '+':
TimeToLeader.append(result['status'])
else:
TimeToLeader.append(None)
else:
TimeToLeader.append(result['Time']['time'])
# RACE STATUS
if result['status'][0] == '+':
RaceStatus.append('Finished')
else:
RaceStatus.append(result['status'])
# CASE THE DRIVER GET OUT OF RACE WITHOUT DO ONE LAP
if 'FastestLap' not in result:
# RANK FASTEST LAP
FastestLapRank.append(None)
# AVERAGE SPEED
AverageSpeed.append(None)
else:
# RANK FASTEST LAP
FastestLapRank.append(result['FastestLap']['rank'])
# AVERAGE SPEED
AverageSpeed.append(result['FastestLap']['AverageSpeed']['speed'])
Initial_Ps_Dict = {'Positions': DriverGridPosition, 'DriverID': DriverID}
Initial_Ps_Data = pd.DataFrame(data=Initial_Ps_Dict)
Initial_Ps_Data = Initial_Ps_Data.set_index('Positions')
Path = self.Path.grandprix_path(year, gp_name, 'InitialPositions')
Initial_Ps_Data.to_csv(Path)
Result_Dict = {'Positions': DriverPosition, 'DriverID': DriverID, 'ConstructorID': ConstructorID,
'Time to Leader': TimeToLeader, 'Status': RaceStatus,
'Fastest Rank': FastestLapRank, 'Average Speed': AverageSpeed}
Result_Data = pd.DataFrame(data=Result_Dict)
Result_Data = Result_Data.set_index('Positions')
Path = self.Path.grandprix_path(year, gp_name, 'Result')
Result_Data.to_csv(Path)
def by_lap_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING LAP TIMES AND POSITIONS BY RACE...', gp_name + bcolors.END)
# Progress Calculator
Progress = progress_calculator.ProgressBar(True)
# URL
url_1, url_2 = self.Url.url_lapbylap(round, year)
# LAP COUNTER
Lap_Counter = 1
# LAP VALIDATOR
Lap_v = True
# DRIVER LIST
driver_list = list(pd.read_csv(self.Path.grandprix_path(year, gp_name, 'Drivers'))['Driver ID'].values)
# DRIVERS DICT
Lap_Times_Dict = {}
Lap_Positions_Dict = {}
# START VALUES
Lap_Times_Dict['Driver ID'] = driver_list
Lap_Positions_Dict['Driver ID'] = driver_list
while Lap_v:
# PROGRESS
Progress.get_progress_counter(Lap_Counter)
# DRIVERS LIST
Lap_Times = []
Lap_Positions = []
page = self.Requests.get(url_1 + str(Lap_Counter) + url_2)
json = page.json()
json = json['MRData']
if int(json['total']) == 0:
Lap_v = False
else:
jtemp = json['RaceTable']
jtemp = jtemp['Races'][0]
jtemp = jtemp['Laps'][0]
Laps = jtemp['Timings']
for driver in driver_list:
Driver_Out_Checker = True
for lap in Laps:
if driver == lap['driverId']:
Driver_Out_Checker = False
Lap_Times.append(lap['time'])
Lap_Positions.append(lap['position'])
if Driver_Out_Checker:
Lap_Times.append(None)
Lap_Positions.append(None)
Lap_Times_Dict[Lap_Counter] = Lap_Times
Lap_Positions_Dict[Lap_Counter] = Lap_Positions
Lap_Counter = Lap_Counter + 1
Lap_Times_Data = pd.DataFrame(data=Lap_Times_Dict)
Lap_Times_Data = Lap_Times_Data.set_index('Driver ID')
Path = self.Path.grandprix_path(year, gp_name, 'TimesByLap')
Lap_Times_Data.to_csv(Path)
Lap_Positions_Data = pd.DataFrame(data=Lap_Positions_Dict)
Lap_Positions_Data = Lap_Positions_Data.set_index('Driver ID')
Path = self.Path.grandprix_path(year, gp_name, 'PositionsByLap')
Lap_Positions_Data.to_csv(Path)
def current_driver_standings(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING DRIVER STANDINGS FROM ERGAST...', gp_name + bcolors.END)
url = self.Url.url_driver_standings(round, year)
# LOAD JSON
page = requests.get(url)
json = page.json()
json = json['MRData']
json = json['StandingsTable']
json = json['StandingsLists'][0]
DriverStandings = json['DriverStandings']
# STARTING LISTS
DriverPosition = []
DriverPoints = []
DriverWins = []
DriverID = []
ConstructorID = []
for driver in DriverStandings:
DriverPosition.append(driver['position'])
DriverPoints.append(driver['points'])
DriverWins.append(driver['wins'])
DriverID.append(driver['Driver']['driverId'])
ConstructorID.append(driver['Constructors'][-1]['constructorId'])
DriverStandingsDict = {'Position': DriverPosition, 'DriverID': DriverID, 'ConstructorID': ConstructorID,
'Wins': DriverWins, 'Points': DriverPoints}
DriverStandingsData = pd.DataFrame(data=DriverStandingsDict)
DriverStandingsData = DriverStandingsData.set_index('Position')
Path = self.Path.standings_path(year)
DriverStandingsData.to_csv(Path)
def status(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING STATUS FROM ERGAST...', gp_name + bcolors.END)
url = self.Url.url_status(round, year)
# LOAD JSON
page = requests.get(url)
json = page.json()
json = json['MRData']
json = json['StatusTable']
Status = json['Status']
# STARTING LISTS
StatusID = []
StatusDescription = []
StatusOccurrences = []
for state in Status:
StatusID.append(state['statusId'])
StatusDescription.append(state['status'])
StatusOccurrences.append(state['count'])
StatusDict = {'StatusID': StatusID, 'Status Description': StatusDescription,
'Status Occurrences': StatusOccurrences}
StatusData = pd.DataFrame(data=StatusDict)
StatusData = StatusData.set_index('StatusID')
Path = self.Path.grandprix_path(year, gp_name, 'RaceStatus')
StatusData.to_csv(Path)
def load_data_from_f1(self, url, year, gp_name):
print(bcolors.ITALIC + 'GETTING SOME DATA FROM F1...', gp_name + bcolors.END)
page = requests.get(url)
json = page.json()
def for_loop_by_time(json):
Time = []
Something = []
i = 0
for value in json:
if i == 0:
Time.append(value)
i = 1
else:
Something.append(value)
i = 0
return Time, Something
def weather(json):
json = json['Weather']
json = json['graph']
weather_data = json['data']
def temperature(json):
def temp_df(json, description):
Time, Temp = for_loop_by_time(json)
TrackTempDict = {"Time": Time, description: Temp}
TrackTempData = pd.DataFrame(data=TrackTempDict)
TrackTempData = TrackTempData.set_index('Time')
return TrackTempData
def track_temp(json):
print(bcolors.ITALIC + 'GETTING TRACK TEMP FROM F1...', gp_name + bcolors.END)
json = json['pTrack']
TrackTempData = temp_df(json, "Track Temperature")
Path = self.Path.grandprix_path(year, gp_name, 'TrackTemp')
TrackTempData.to_csv(Path)
def air_temp(json):
print(bcolors.ITALIC + 'GETTING AIR TEMP FROM F1...', gp_name + bcolors.END)
json = json['pAir']
TrackTempData = temp_df(json, "Air Temperature")
Path = self.Path.grandprix_path(year, gp_name, 'AirTemp')
TrackTempData.to_csv(Path)
track_temp(json)
air_temp(json)
def is_raining(json):
print(bcolors.ITALIC + 'GETTING WEATHER FROM F1...', gp_name + bcolors.END)
json = json['pRaining']
Time, Raining = for_loop_by_time(json)
TrackTemp = {"Time": Time, "Is Raining": Raining}
TrackTempData = pd.DataFrame(data=TrackTemp)
TrackTempData = TrackTempData.set_index('Time')
Path = self.Path.grandprix_path(year, gp_name, 'Raining')
TrackTempData.to_csv(Path)
def wind_speed(json):
print(bcolors.ITALIC + 'GETTING WIND SPEED FROM F1...', gp_name + bcolors.END)
json = json['pWind Speed']
Time, Wind_Speed = for_loop_by_time(json)
TrackTemp = {"Time": Time, "Wind Speed": Wind_Speed}
TrackTempData = pd.DataFrame(data=TrackTemp)
TrackTempData = TrackTempData.set_index('Time')
Path = self.Path.grandprix_path(year, gp_name, 'Wind_Speed')
TrackTempData.to_csv(Path)
def wind_direction(json):
print(bcolors.ITALIC + 'GETTING WIND DIRECTION FROM F1...', gp_name + bcolors.END)
json = json['pWind Dir']
Time, Wind_Direction = for_loop_by_time(json)
TrackTemp = {"Time": Time, "Wind Direction": Wind_Direction}
TrackTempData = pd.DataFrame(data=TrackTemp)
TrackTempData = TrackTempData.set_index('Time')
Path = self.Path.grandprix_path(year, gp_name, 'Wind_Direction')
TrackTempData.to_csv(Path)
def humidity(json):
print(bcolors.ITALIC + 'GETTING HUMIDITY FROM F1...', gp_name + bcolors.END)
json = json['pHumidity']
Time, Humidity = for_loop_by_time(json)
TrackTemp = {"Time": Time, "Humidity": Humidity}
TrackTempData = pd.DataFrame(data=TrackTemp)
TrackTempData = TrackTempData.set_index('Time')
Path = self.Path.grandprix_path(year, gp_name, 'Humidity')
TrackTempData.to_csv(Path)
def air_pressure(json):
print(bcolors.ITALIC + 'GETTING AIR PRESSURE FROM F1...', gp_name + bcolors.END)
json = json['pPressure']
Time, Air_Pressure = for_loop_by_time(json)
TrackTemp = {"Time": Time, "Air Pressure": Air_Pressure}
TrackTempData = pd.DataFrame(data=TrackTemp)
TrackTempData = TrackTempData.set_index('Time')
Path = self.Path.grandprix_path(year, gp_name, 'Air_Pressure')
TrackTempData.to_csv(Path)
temperature(weather_data)
is_raining(weather_data)
wind_speed(weather_data)
wind_direction(weather_data)
humidity(weather_data)
air_pressure(weather_data)
def track_status(json):
print(bcolors.ITALIC + 'GETTING TRACK STATUS FROM F1...', gp_name + bcolors.END)
json = json['Scores']
json = json['graph']
TrackStatusJson = json['TrackStatus']
TrackStatus = []
Laps = []
i = 0
for lap in TrackStatusJson:
if i == 1:
if lap == '':
TrackStatus.append(None)
elif lap == 'Y':
TrackStatus.append('YellowFlag')
elif lap == 'S':
TrackStatus.append('SafetyCar')
elif lap == 'R':
TrackStatus.append('RedFlag')
else:
TrackStatus.append(lap)
i = i - 1
else:
Laps.append(lap)
i = i + 1
TrackStatusDict = {"Lap": Laps, "Status": TrackStatus}
TrackStatusData = pd.DataFrame(data=TrackStatusDict)
TrackStatusData = TrackStatusData.set_index('Lap')
Path = self.Path.grandprix_path(year, gp_name, 'Track_Status')
TrackStatusData.to_csv(Path)
def drivers_performance_points(json):
print(bcolors.ITALIC + 'GETTING DRIVER PERFORMANCE POINTS FROM F1...', gp_name + bcolors.END)
json = json['Scores']
json = json['graph']
PF_Points = json['Performance']
DriversID = list(pd.read_csv(self.Path.grandprix_path(year, gp_name, "Drivers"))['Driver ID'])
DriversInitials = list(pd.read_csv(self.Path.grandprix_path(year, gp_name, "Drivers"))['Driver Initials'])
Laps = list(pd.read_csv(self.Path.grandprix_path(year, gp_name, "Track_Status"))['Lap'][1:])
DriverPerformancePointsDict = {}
DriverPerformancePointsDict['Lap'] = Laps
counter = 0
for Driver in DriversInitials:
i = 0
Performance_Gap = []
for Performance in PF_Points['p'+Driver]:
if i == 0:
i = i + 1
else:
Performance_Gap.append(Performance)
i = i - 1
while Performance_Gap.__len__() < Laps.__len__():
Performance_Gap.append(None)
DriverPerformancePointsDict[DriversID[counter]] = Performance_Gap
counter = counter + 1
DriverPerformanceData = pd.DataFrame(data=DriverPerformancePointsDict)
DriverPerformanceData = DriverPerformanceData.set_index('Lap')
Path = self.Path.grandprix_path(year, gp_name, 'Drivers_Performance')
DriverPerformanceData.to_csv(Path)
def order_driver_list(json):
json = json['init']
json = json['data']
Drivers_json = json['Drivers']
Drivers_InOrder = []
Drivers_Dict = {}
Drivers_Ordered = []
for Driver in Drivers_json:
Drivers_InOrder.append(Driver['Initials'])
DriversID = list(pd.read_csv(self.Path.grandprix_path(year, gp_name, "Drivers"))['Driver ID'])
DriversInitials = list(pd.read_csv(self.Path.grandprix_path(year, gp_name, "Drivers"))['Driver Initials'])
i = 0
for Driver in DriversInitials:
Drivers_Dict[Driver] = DriversID[i]
i = i + 1
for Driver in Drivers_InOrder:
Drivers_Ordered.append(Drivers_Dict[Driver])
return Drivers_Ordered
def highest_speed(json):
print(bcolors.ITALIC + 'GETTING HIGHEST SPEED FROM F1...', gp_name + bcolors.END)
temp = json['best']
temp = temp['data']
temp = temp['DR']
Highest_Speed_Sector_1 = []
Highest_Speed_Sector_2 = []
Highest_Speed_Sector_3 = []
for item in temp:
i = 0
for content in item['B']:
if i == 13:
Highest_Speed_Sector_1.append(content)
elif i == 16:
Highest_Speed_Sector_2.append(content)
elif i == 19:
Highest_Speed_Sector_3.append(content)
i = i + 1
SpeedDict = {'Driver': order_driver_list(json), 'Speed S1': Highest_Speed_Sector_1,
'Speed S2': Highest_Speed_Sector_2, 'Speed S3': Highest_Speed_Sector_3}
SpeedData = pd.DataFrame(data=SpeedDict)
SpeedData = SpeedData.set_index('Driver')
Path = self.Path.grandprix_path(year, gp_name, 'Highest_Speed')
SpeedData.to_csv(Path)
def tyre_types(json):
print(bcolors.ITALIC + 'GETTING TYRES HISTORY FROM F1...', gp_name + bcolors.END)
DriverList = order_driver_list(json)
TyresHistory = []
Tyres_Dict = {}
temp = json['xtra']
temp = temp['data']
Tyres_json = temp['DR']
temp_lenght = 0
for TyreLine in Tyres_json:
TyresHistory.append(TyreLine['X'][9])
if len(TyreLine['X'][9]) > temp_lenght:
temp_lenght = len(TyreLine['X'][9])
i = 0
for Tyres in TyresHistory:
Driver_Tyres = []
for Tyre in Tyres:
Driver_Tyres.append(Tyre)
while len(Driver_Tyres) < temp_lenght:
Driver_Tyres.append(None)
Tyres_Dict[DriverList[i]] = Driver_Tyres
i = i + 1
Tyre_Data = | pd.DataFrame(data=Tyres_Dict) | pandas.DataFrame |
import time
import os.path
import sys
from datetime import datetime
import pandas as pd
from wimbledon.vis import Visualise
FIG_DIR = "../data/figs"
PROJECTS_DIR = FIG_DIR + "/projects"
PEOPLE_DIR = FIG_DIR + "/people"
def check_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def save_sheet(sheet, save_dir, save_name):
check_dir(save_dir)
with open(save_dir + "/" + save_name + ".html", "w") as f:
f.write(sheet)
def whiteboard(vis, display="screen"):
print("Creating Whiteboard visualisations... ", end="", flush=True)
start = time.time()
# make whiteboard html
sheet = vis.whiteboard("person", display=display)
save_sheet(sheet, PEOPLE_DIR, "people")
sheet = vis.whiteboard("project", display=display)
save_sheet(sheet, PROJECTS_DIR, "projects")
print("{:.1f}s".format(time.time() - start))
if __name__ == "__main__":
print("Initialising visualisation object... ", end="", flush=True)
init = time.time()
if len(sys.argv) > 1:
start_date = pd.to_datetime(sys.argv[1])
else:
start_date = datetime.now() - pd.Timedelta("30 days")
if len(sys.argv) > 2:
end_date = pd.to_datetime(sys.argv[2])
else:
end_date = start_date + | pd.Timedelta("395 days") | pandas.Timedelta |
import io
import os
import re
import sys
import time
import pandas
import datetime
import requests
import mplfinance
from matplotlib import dates
# Basic Data
file_name = __file__[:-3]
absolute_path = os.path.dirname(os.path.abspath(__file__))
# <editor-fold desc='common'>
def load_json_config():
global file_directory
config_file = os.path.join(os.sep, absolute_path, 'Config.cfg')
with open(config_file, 'r') as file_handler:
config_data = file_handler.read()
regex = 'FILE_DIRECTORY=.*'
match = re.findall(regex, config_data)
file_directory = match[0].split('=')[1].strip()
# </editor-fold>
# <editor-fold desc='daily update'>
def save_dict_to_file(dic, txt):
f = open(txt, 'w', encoding='utf-8')
f.write(dic)
f.close()
def load_dict_from_file(txt):
f = open(txt, 'r', encoding='utf-8')
data = f.read()
f.close()
return eval(data)
def crawl_price(date=datetime.datetime.now()):
date_str = str(date).split(' ')[0].replace('-', '')
r = requests.post('http://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date=' + date_str + '&type=ALL')
ret = pandas.read_csv(io.StringIO('\n'.join([i.translate({ord(c): None for c in ' '}) for i in r.text.split('\n') if
len(i.split(',')) == 17 and i[0] != '='])), header=0,
index_col='證券代號')
ret['成交金額'] = ret['成交金額'].str.replace(',', '')
ret['成交股數'] = ret['成交股數'].str.replace(',', '')
return ret
def original_crawl_price(date='2011-01-01 00:00:00'):
print('Begin: original_crawl_price!')
data = {}
success = False
dateFormatter = '%Y-%m-%d %H:%M:%S'
date = datetime.datetime.strptime(date, dateFormatter)
while not success:
print('parsing', date)
try:
data[date.date()] = crawl_price(date)
print('success!')
success = True
except pandas.errors.EmptyDataError:
# 假日爬不到
print('fail! check the date is holiday')
# 減一天
date += datetime.timedelta(days=1)
time.sleep(10)
writer = pandas.ExcelWriter(stock_file_path, engine='xlsxwriter')
stock_volume = pandas.DataFrame({k: d['成交股數'] for k, d in data.items()}).transpose()
stock_volume.index = pandas.to_datetime(stock_volume.index)
stock_volume.to_excel(writer, sheet_name='stock_volume', index=True)
stock_open = pandas.DataFrame({k: d['開盤價'] for k, d in data.items()}).transpose()
stock_open.index = pandas.to_datetime(stock_open.index)
stock_open.to_excel(writer, sheet_name='stock_open', index=True)
stock_close = pandas.DataFrame({k: d['收盤價'] for k, d in data.items()}).transpose()
stock_close.index = pandas.to_datetime(stock_close.index)
stock_close.to_excel(writer, sheet_name='stock_close', index=True)
stock_high = pandas.DataFrame({k: d['最高價'] for k, d in data.items()}).transpose()
stock_high.index = pandas.to_datetime(stock_high.index)
stock_high.to_excel(writer, sheet_name='stock_high', index=True)
stock_low = pandas.DataFrame({k: d['最低價'] for k, d in data.items()}).transpose()
stock_low.index = pandas.to_datetime(stock_low.index)
stock_low.to_excel(writer, sheet_name='stock_low', index=True)
writer.save()
print('End: original_crawl_price!')
def update_stock_info():
print('Begin: update_stock_info!')
data = {}
count = 1
fail_count = 0
allow_continuous_fail_count = 20
try:
pandas.read_excel(stock_file_path, sheet_name='stock_volume', index_col=0)
print(r'{} Exist.'.format(stock_file_path))
except FileNotFoundError:
print(r'{} Not Exist.'.format(stock_file_path))
original_crawl_price()
stock_volume_old = pandas.read_excel(stock_file_path, sheet_name='stock_volume', index_col=0)
stock_volume_old.index = pandas.to_datetime(stock_volume_old.index)
stock_open_old = pandas.read_excel(stock_file_path, sheet_name='stock_open', index_col=0)
stock_open_old.index = pandas.to_datetime(stock_open_old.index)
stock_close_old = pandas.read_excel(stock_file_path, sheet_name='stock_close', index_col=0)
stock_close_old.index = pandas.to_datetime(stock_close_old.index)
stock_high_old = pandas.read_excel(stock_file_path, sheet_name='stock_high', index_col=0)
stock_high_old.index = pandas.to_datetime(stock_high_old.index)
stock_low_old = pandas.read_excel(stock_file_path, sheet_name='stock_low', index_col=0)
stock_low_old.index = pandas.to_datetime(stock_low_old.index)
last_date = stock_volume_old.index[-1]
dateFormatter = '%Y-%m-%d %H:%M:%S'
date = datetime.datetime.strptime(str(last_date), dateFormatter)
date += datetime.timedelta(days=1)
if date > datetime.datetime.now():
print('Finish update_stock_info!')
sys.exit(0)
while date < datetime.datetime.now() and count <= 100:
print('parsing', date)
try:
data[date.date()] = crawl_price(date)
print('success {} times!'.format(count))
fail_count = 0
count += 1
except pandas.errors.EmptyDataError:
# 假日爬不到
print('fail! check the date is holiday')
fail_count += 1
if fail_count == allow_continuous_fail_count:
raise
date += datetime.timedelta(days=1)
time.sleep(10)
writer = pandas.ExcelWriter(stock_file_path, engine='xlsxwriter')
stock_volume_new = pandas.DataFrame({k: d['成交股數'] for k, d in data.items()}).transpose()
stock_volume_new.index = pandas.to_datetime(stock_volume_new.index)
stock_volume = pandas.concat([stock_volume_old, stock_volume_new], join='outer')
stock_volume.to_excel(writer, sheet_name='stock_volume', index=True)
stock_open_new = pandas.DataFrame({k: d['開盤價'] for k, d in data.items()}).transpose()
stock_open_new.index = pandas.to_datetime(stock_open_new.index)
stock_open = pandas.concat([stock_open_old, stock_open_new], join='outer')
stock_open.to_excel(writer, sheet_name='stock_open', index=True)
stock_close_new = pandas.DataFrame({k: d['收盤價'] for k, d in data.items()}).transpose()
stock_close_new.index = | pandas.to_datetime(stock_close_new.index) | pandas.to_datetime |
#!/usr/bin/env python
"""
Application: COMPOSE Framework
File name: compose.py
Author: <NAME>
Creation: 08/05/2021
The University of Arizona
Department of Electrical and Computer Engineering
College of Engineering
PhD Advisor: Dr. <NAME>
"""
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from cProfile import run
from socketserver import ThreadingUnixDatagramServer
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import cse
from concurrent.futures import ProcessPoolExecutor
import multiprocessing
import qns3vm as ssl
import benchmark_datagen as bmdg
import random
import time
import label_propagation as lbl_prop
import util as ut
import matplotlib.animation as animation
import math
from tqdm import tqdm
class COMPOSE:
def __init__(self,
classifier = 'QN_S3VM',
method= 'gmm',
verbose = 1,
num_cores = 0.8,
selected_dataset = 'UG_2C_2D'):
"""
Initialization of Fast COMPOSE
"""
self.timestep = 1 # The current timestep of the datase
self.synthetic = 0 # 1 Allows synthetic data during cse and {0} does not allow synthetic data
self.n_cores = num_cores # Level of feedback displayed during run {default}
self.verbose = verbose # 0 : No Information Displayed
# 1 : Command line progress updates - {default}
# 2 : Plots when possible and Command line progress updates
self.data = {} # array of timesteps each containing a matrix N instances x D features
self.labeled = {} # array of timesteps each containing a vector N instances x 1 - Correct label
self.unlabeled = {}
self.hypothesis = {} # array of timesteps each containing a N instances x 1 - Classifier hypothesis
self.core_supports = {} # array of timesteps each containing a N instances x 1 - binary vector indicating if instance is a core support (1) or not (0)
self.num_cs = {} # number of core supports
self.total_time = {}
self.cse_opts = [] # options for the selected cse function in cse class
self.selected_dataset = selected_dataset
self.classifier = classifier
self.method = method # not sure what to use for method
self.dataset = selected_dataset
self.figure_xlim = []
self.figure_ylim = []
self.step = 0
self.learner = {}
self.classifier_accuracy = {}
self.classifier_error = {}
self.time_to_predict = {}
self.user_data_input = {}
self.avg_results = {}
self.avg_results_dict = {}
if self.classifier is None:
avail_classifier = ['knn', 's3vm']
print('The following classifiers are available:\n' , avail_classifier)
classifier_input = input('Enter classifier:')
self.classifier = classifier_input
if verbose is None:
# set object displayed info setting
print("Only 3 options to display information for verbose: \n",
"0 - No Info ; \n",
"1 - Command Line Progress Updates; \n",
"2 - Plots when possilbe and Command Line Progress \n")
print("Set Verbose: ")
verbose_input = input("Enter display information option:")
self.verbose = verbose_input
if self.verbose >= 0 and self.verbose <=2:
if self.verbose == 1:
print("Run method: ", self.verbose)
else:
print("Only 3 options to display information: \n",
"0 - No Info ;\n",
"1 - Command Line Progress Updates;\n",
"2 - Plots when possilbe and Command Line Progress")
def compose(self):
"""
Sets COMPOSE dataset and information processing options
Check if the input parameters are not empty for compose
This checks if the dataset is empty and checks what option of feedback you want
Gets dataset and verbose (the command to display options as COMPOSE processes)
Verbose: 0 : no info is displayed
1 : Command Line progress updates
2 : Plots when possible and Command Line progress updates
"""
# set labels and unlabeles and dataset to process
self.set_data()
# set drift window
self.set_drift_window()
def set_drift_window(self):
"""
Finds the lower and higher limits to determine drift
Initial assumption is based on dataset min/max
"""
self.figure_xlim = np.amin(self.dataset)
self.figure_ylim = np.amax(self.dataset)
if self.verbose == 1:
print("Drift window:" , [self.figure_xlim, self.figure_ylim])
def set_cores(self):
"""
Establishes number of cores to conduct parallel processing
"""
num_cores = multiprocessing.cpu_count() # determines number of cores
if self.verbose == 1:
print("Available cores:", num_cores)
percent_cores = math.ceil(self.n_cores * num_cores)
if percent_cores > num_cores:
print("You do not have enough cores on this machine. Cores have to be set to ", num_cores)
self.n_cores = int(num_cores) # sets number of cores to available
else:
self.n_cores = int(percent_cores) # original number of cores to 1
if self.verbose == 1:
print("Number of cores executing:", self.n_cores)
def get_core_supports(self, input_data = None):
"""
Method provides core supports based on desired core support extraction.
Available Core Support Extraction includes:
GMM, Parzen Window, KNN, and Alpha Shape Core Supports
"""
self.cse = cse.CSE(data=input_data) # gets core support based on first timestep
if self.method == 'gmm':
self.cse.set_boundary(self.method)
self.num_cs[self.timestep] = len(self.cse.gmm())
self.core_supports[self.timestep] = self.cse.gmm()
elif self.method == 'parzen':
self.cse.set_boundary(self.method)
self.num_cs[self.timestep] = len(self.cse.parzen())
self.core_supports[self.timestep] = self.cse.parzen()
elif self.method == 'a_shape':
self.cse.set_boundary(self.method)
self.core_supports[self.timestep] = self.cse.a_shape_compaction()
def set_data(self):
"""
Method sets the dataset in its repespective bins, data with timesteps, gets labaled data and unlabeled data from dataset
"""
if not self.dataset:
avail_data_opts = ['UG_2C_2D','MG_2C_2D','1CDT', '2CDT', 'UG_2C_3D','1CHT','2CHT','4CR','4CREV1','4CREV2','5CVT','1CSURR',
'4CE1CF','FG_2C_2D','GEARS_2C_2D', 'keystroke', 'UG_2C_5D', 'UnitTest']
print('The following datasets are available:\n' , avail_data_opts)
self.dataset = input('Enter dataset:')
if self.verbose == 1 :
print("Dataset:", self.dataset)
print("Method:", self.method)
self.user_data_input = self.dataset
data_gen = bmdg.Datagen()
dataset_gen = data_gen.gen_dataset(self.dataset)
self.dataset = dataset_gen
ts = 0
## set a self.data dictionary for each time step
## self.dataset[0][i] loop the arrays and append them to dictionary
for i in range(0, len(self.dataset[0])):
ts += 1
self.data[ts] = self.dataset[0][i]
# filter out labeled and unlabeled from of each timestep
for i in self.data:
len_of_batch = len(self.data[i])
label_batch = []
unlabeled_batch = []
for j in range(0, len_of_batch - 1):
if self.data[i][j][2] == 1:
label_batch.append(self.data[i][j])
self.labeled[i] = label_batch
else:
unlabeled_batch.append(self.data[i][j])
self.unlabeled[i] = unlabeled_batch
# convert labeled data to match self.data data structure
labeled_keys = self.labeled.keys()
for key in labeled_keys:
if len(self.labeled[key]) > 1:
len_of_components = len(self.labeled[key])
array_tuple = []
for j in range(0, len_of_components):
array = np.array(self.labeled[key][j])
arr_to_list = array.tolist()
array_tuple.append(arr_to_list)
array = []
arr_to_list = []
concat_tuple = np.vstack(array_tuple)
self.labeled[key] = concat_tuple
# convert unlabeled data to match self.data data structure
unlabeled_keys = self.unlabeled.keys()
for key in unlabeled_keys:
if len(self.unlabeled[key]) > 1:
len_of_components = len(self.unlabeled[key])
array_tuple = []
for j in range(0, len_of_components):
array = np.array(self.unlabeled[key][j])
arr_to_list = array.tolist()
array_tuple.append(arr_to_list)
array = []
arr_to_list = []
concat_tuple = np.vstack(array_tuple)
self.unlabeled[key] = concat_tuple
def classify(self, X_train_l, L_train_l, X_train_u, X_test, L_test):
"""
Available classifiers : 'label_propagation', 'QN_S3VM'
For QN_S3VM:
Sets classifier by getting the classifier object from ssl module
loads classifier based on user input
The QN_S3VM options are the following:
X_l -- patterns of labeled part of the data
L_l -- labels of labeled part of the data
X_u -- patterns of unlabeled part of the data
random_generator -- particular instance of a random_generator (default None)
kw -- additional parameters for the optimizer
"""
if self.classifier == 'QN_S3VM':
random_gen = random.Random()
random_gen.seed(0)
X_L_train = []
X_train_l = np.array(X_train_l)
for i in range(0, len(X_train_l)):
add = np.array(X_train_l[i])
X_L_train.append(add)
X_train_l = X_L_train
L_l_train = []
L_train_l = np.array(L_train_l)
for i in range(0, len(L_train_l)):
add = np.array(L_train_l[:,-1][i])
L_l_train.append(add.astype(int))
L_train_l = L_l_train
L_train_l = np.array(L_train_l)
model = ssl.QN_S3VM(X_train_l, L_train_l, X_train_u, random_gen)
model.train()
preds = model.getPredictions(X_test)
return preds
elif self.classifier == 'label_propagation':
ssl_label_propagation = lbl_prop.Label_Propagation(X_train_l, L_train_l, X_train_u)
preds = ssl_label_propagation.ssl()
return preds
elif self.classifier == 'knn':
self.cse = cse.CSE(data=self.data)
self.cse.set_boundary('knn')
self.cse.k_nn()
def classification_error(self, preds, L_test):
return np.sum(preds != L_test)/len(preds)
def results_logs(self):
avg_error = np.array(sum(self.classifier_error.values()) / len(self.classifier_error))
avg_accuracy = np.array(sum(self.classifier_accuracy.values()) / len(self.classifier_accuracy))
avg_exec_time = np.array(sum(self.time_to_predict.values()) / len(self.time_to_predict))
avg_results_df = pd.DataFrame({'Dataset': [self.selected_dataset], 'Classifier': [self.classifier],'Method': [self.method], 'Avg_Error': [avg_error], 'Avg_Accuracy': [avg_accuracy], 'Avg_Exec_time': [avg_exec_time]},
columns=['Dataset','Classifier','Method','Avg_Error', 'Avg_Accuracy', 'Avg_Exec_Time'])
self.avg_results_dict['Dataset'] = self.selected_dataset
self.avg_results_dict['Classifier'] = self.classifier
self.avg_results_dict['Method'] = self.method
self.avg_results_dict['Avg_Error'] = avg_error
self.avg_results_dict['Avg_Accuracy'] = avg_accuracy
self.avg_results_dict['Avg_Exec_Time'] = avg_exec_time
run_method = self.selected_dataset + '_' + self.classifier + '_' + self.method
self.avg_results[run_method] = avg_results_df
if self.verbose == 1:
print('Execition Time:', self.total_time[self.user_data_input], "seconds")
print('Average error:', avg_error)
print('Average Accuracy:', avg_accuracy)
print('Average Execution Time per Timestep:', avg_exec_time, "seconds")
df = pd.DataFrame.from_dict((self.classifier_accuracy.keys(), self.classifier_accuracy.values())).T
accuracy_scores = | pd.DataFrame(df.values, columns=['Timesteps', 'Accuracy']) | pandas.DataFrame |
# coding=utf-8
# Author: <NAME>
# Date: May 19, 2020
#
# Description: Calculates the number of expressed genes (protein-coding) in pairs of species, for all cell types, across different TPM cut-offs.
#
import numpy as np
import pandas as pd
import networkx as nx
from itertools import combinations
from utils import ensurePathExists, get_network_layer
if __name__ == '__main__':
species = ['HS', 'MM', 'DM']
celltypes = ['spermatogonia', 'spermatocyte', 'spermatid', 'enterocyte', 'neuron', 'muscle']
thresholds = [0.01, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10]
print('Loading Genome Network')
rGfile_gpickle = '../../04-network/results/network/net-{network:s}.gpickle'.format(network='genome')
G = nx.read_gpickle(rGfile_gpickle)
remove_non_protein_coding = [n for n, d in G.nodes(data=True) if d.get('biotype', '') != 'protein_coding']
G.remove_nodes_from(remove_non_protein_coding)
"""
print('Separate Layers')
HSG = get_network_layer(G, 'HS')
MMG = get_network_layer(G, 'MM')
DMG = get_network_layer(G, 'DM')
Gx = {'HS': HSG, 'MM': MMG, 'DM': DMG}
"""
r = []
for specie_i, specie_j in combinations(species, 2):
print("Calculating for species: {specie_i:s} - {specie_j:s}".format(specie_i=specie_i, specie_j=specie_j))
for celltype in celltypes:
print("Calculating for celltype: {celltype:s}".format(celltype=celltype))
rFPKMifile = '../../02-core_genes/results/FPKM/{specie:s}/{specie:s}-FPKM-{celltype:s}.csv.gz'.format(specie=specie_i, celltype=celltype)
rFPKMjfile = '../../02-core_genes/results/FPKM/{specie:s}/{specie:s}-FPKM-{celltype:s}.csv.gz'.format(specie=specie_j, celltype=celltype)
#
df_i = pd.read_csv(rFPKMifile)
df_j = pd.read_csv(rFPKMjfile)
#
for threshold in thresholds:
genes_i = df_i.loc[((df_i['biotype'] == 'protein_coding') & (df_i['TPM'] >= threshold)), 'id_gene'].tolist()
genes_j = df_j.loc[((df_j['biotype'] == 'protein_coding') & (df_j['TPM'] >= threshold)), 'id_gene'].tolist()
#
genes_ij = genes_i + genes_j
# Only genes in this modules
Gtmp = nx.subgraph(G, genes_ij).copy()
# Remove intra edges
remove_intra_edges = [(i, j) for i, j, d in Gtmp.edges(data=True) if d.get('type', None) == 'intra']
Gtmp.remove_edges_from(remove_intra_edges)
# Remove isolates
remove_isolates_nodes = list(nx.isolates(Gtmp))
Gtmp.remove_nodes_from(remove_isolates_nodes)
a = set(genes_i)
b = set(genes_j)
a_union_b = a.union(b)
a_inter_b = set(Gtmp.nodes())
similarity = len(a_inter_b) / len(a_union_b)
r.append((specie_i, specie_j, celltype, threshold, similarity))
#
dfR = | pd.DataFrame(r, columns=['specie_i', 'specie_j', 'celltype', 'threshold', 'similarity']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 4 13:56:01 2020
@author: <NAME>
Mapping is a class that applies the principle of virtual work to aeroelaticity.
In this class the matrices linking the CFD mesh to the structure mesh are
computed.
OK TODO: For the computation of the moment, distance must be computed in the x,y
plane.
TODO: verify the moment orientation
"""
import logging
import numpy as np
import numpy.linalg as LA
import scipy as sp
import skspatial.objects as sk
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
import pandas as pd
logger = logging.getLogger(__name__)
class mapper:
def __init__(self,pytornadoVariables,preMeshedStructre,csdSolverClassVar):
"""
Initialises the class and separes the wing points for enhanced quality
results.
"""
# For debug purposes
plotting = False
np.set_printoptions(precision=3)
# Assembles matrices
self.geo = preMeshedStructre
self.lattice = pytornadoVariables[0]
self.VLMdata = pytornadoVariables[1]
self.geoP = preMeshedStructre.aircraftNodesPoints
self.csd = csdSolverClassVar
# Separates lattice.c into each wing instances
self.wingsPoints = []
self.limitsStart = []
self.limitsEnd = []
# Separates the lattice points, understand the VLM mesh for each wing
# hence leading to better results since each wing only contribute to her
# specific beam.
number = len(self.lattice.bookkeeping_by_wing_uid)
for i in self.lattice.bookkeeping_by_wing_uid:
# Gets the data that needs to be sorted.
listing = list(self.lattice.bookkeeping_by_wing_uid.get(i)[0][1])
init = listing[0]
# logger.debug("init = "+str(init))
listing = list(self.lattice.bookkeeping_by_wing_uid.get(i)[-1][1])
panels = self.lattice.bookkeeping_by_wing_uid.get(i)[-1][2]
# takes care of last segment hence the + 1 at the end
end = listing[-1] + panels + 1
# logger.debug("number = "+str(number))
# logger.debug("end = "+str(end))
self.limitsStart.append(init)
self.limitsEnd.append(end)
# Appends the separated points
self.wingsPoints.append(self.lattice.bound_leg_midpoints[init:end])
number -= 1
# Plot for debug purposes only
if plotting:
fig = plt.figure("figure 1")
ax = fig.add_subplot(111, projection='3d')
for i in range(len(self.wingsPoints)):
ax.scatter(self.wingsPoints[i][:,0],
self.wingsPoints[i][:,1],
self.wingsPoints[i][:,2],
label='Wing '+str(i+1))
val = 15
ax.set_xlim(-val,val)
ax.set_ylim(-val,val)
ax.set_zlim(-val,val)
ax.legend()
plt.show()
def computesTransformationsMatrices(self):
# Computes transformations matrices
self.iM = []
self.A = []
self.H = []
# For debugging
self.dzsGlob = []
self.dzaGlob = []
plotting = False
for i in range(len(self.wingsPoints)):
# Computes the matrix M and then invert it
# permitted choices are: G,L,TPS,HMQ,HIMQ,C0,C2,C4,C6,EH see below
# the definition
fun = "C0"
n = self.geoP[i + self.geo.nFuselage].shape
n = n[0]
Mbeam = np.zeros((n,n))
for k in range(n):
for j in range(n):
x1 = self.geoP[i + self.geo.nFuselage][k]
x2 = self.geoP[i + self.geo.nFuselage][j]
Mbeam[k,j] = self.phi(x1,x2,fun)
self.iM.append(LA.inv(Mbeam))
# Computes the matrix Q which is also the matrix A transposed in
# this specific case c.f. to the theory.
m = self.wingsPoints[i].shape
m = m[0]
Q = np.zeros((n,m))
# logger.debug("n = "+str(n))
# logger.debug("m = "+str(m))
for k in range(n):
for j in range(m):
x1 = self.geoP[i + self.geo.nFuselage][k]
x2 = self.wingsPoints[i][j]
Q[k,j] = self.phi(x1,x2,fun)
self.A.append(Q.T)
self.H.append(np.matmul(self.A[i],self.iM[i]))
# logger.debug(self.lattice.c.shape)
# logger.debug("A "+str(self.A[0].shape))
# logger.debug("iM"+str(self.iM[0].shape))
# logger.debug("H "+str(self.H[0].shape))
# tests the mapping:
n = self.geoP[i + self.geo.nFuselage].shape
n = n[0]
dzs = np.zeros(n)
for k in range(n):
dzs[k] = 0.01 * self.geoP[i + self.geo.nFuselage][k,1]**2
self.dzsGlob.append(dzs)
dza = np.matmul(self.H[i],self.dzsGlob[i])
self.dzaGlob.append(dza)
# # Plots line
# if plotting:
# fig = plt.figure("figure 2")
# ax = fig.add_subplot(111, projection='3d')
# for p in range(len(self.wingsPoints)):
# # ax.scatter(self.geoP[p + self.geo.nFuselage][:,0],
# # self.geoP[p + self.geo.nFuselage][:,1],
# # self.geoP[p + self.geo.nFuselage][:,2],
# # label='beam wing '+str(p+1))
# # ax.scatter(self.geoP[p + self.geo.nFuselage][:,0],
# # self.geoP[p + self.geo.nFuselage][:,1],
# # self.geoP[p + self.geo.nFuselage][:,2]+self.dzsGlob[i],
# # label='deformed beam wing '+str(p+1))
# ax.scatter(self.wingsPoints[p][:,0],
# self.wingsPoints[p][:,1],
# self.wingsPoints[p][:,2],
# label='undeformed wing'+str(p+1))
# ax.scatter(self.wingsPoints[p][:,0],
# self.wingsPoints[p][:,1],
# self.wingsPoints[p][:,2]+self.dzaGlob[p],
# label='deformed wing'+str(p+1))
# val = 15
# ax.set_xlim(-val,val)
# ax.set_ylim(-val,val)
# ax.set_zlim(-val,val)
# ax.legend()
# plt.show()
def phi(self,x1,x2,fun):
"""
Set of radial basis functions that the user can choose of. After some
test "Linear" seems to be the better choice and only suitable choice.
This is due to the fact that the aeroelasticity is done with a beam
hence a line of point and not a surface. So when a RBF is defined with
a mesh which is smaller than the chord length there is a problem since
the RBF is zero at the leading a trailing edge.
All the other functions are here in case someone finds a way to connect
the solver to some 2D or 3D structure FEM solver.
"""
eps = 1e10
r = LA.norm(x1-x2)
if fun == "G":
# Gaussian
phi_x = np.exp(-eps*r**2)
elif fun == "L":
# Linear
phi_x = r
elif fun == "C":
# Linear
phi_x = 1
elif fun == "TPS":
# Thin plate spline
phi_x = r**2 * np.log(r)
elif fun == "HMQ":
# Hardy's multiquadratic
phi_x = (eps**2 + r**2)**0.5
elif fun == "HIMQ":
# Hardy's inverse multiquadratic
phi_x = 1/(eps**2 + r**2)**0.5
elif fun == "C0":
# Wendland's C0
phi_x = (1-r)**2
elif fun == "C2":
# Wendland's C2
phi_x = (1-r)**4 * (4*r + 1)
elif fun == "C4":
# Wendland's C4
phi_x = (1-r)**6 * (35*r**2 + 18*r + 3)
elif fun == "C6":
# Wendland's C6
phi_x = (1-r)**8 * (32*r**3 + 25*r**2 + 8*r + 1)
elif fun == "EH":
# Euclid's hat
phi_x = np.pi*((1/12*r**3) - r*eps**2 + 4/3*eps**3)
return phi_x
def aeroToStructure(self,args,iteration):
"""
Compute the forces for the structure solver from the CFD solver resutls.
"""
logger.debug("aeroToStructure")
# structure forces
self.sfx = []
self.sfy = []
self.sfz = []
self.spmx = []
self.spmy = []
self.spmz = []
self.smx = []
self.smy = []
self.smz = []
# Aerodynamics forces
self.afx = []
self.afy = []
self.afz = []
self.apmx = []
self.apmy = []
self.apmz = []
# separates froces for each wings
N = len(self.wingsPoints)
for i in range(N):
start = self.limitsStart[i]
end = self.limitsEnd[i]
# Aerodynamic panel forces
self.afx.append(self.VLMdata.panelwise["fx"][start:end])
self.afy.append(self.VLMdata.panelwise["fy"][start:end])
self.afz.append(self.VLMdata.panelwise["fz"][start:end])
# # Aerodynamic panel moments
# self.apmx.append(self.VLMdata.panelwise["mx"][start:end])
# self.apmy.append(self.VLMdata.panelwise["my"][start:end])
# self.apmz.append(self.VLMdata.panelwise["mz"][start:end])
# Calls the function in order to compute the moment generated by each
# force on the wing.
self.computeMoments()
# Computes the forces that act on the structure
for i in range(N):
# Computes the forces
self.sfx.append(np.matmul(self.H[i].T,self.afx[i]))
self.sfy.append(np.matmul(self.H[i].T,self.afy[i]))
self.sfz.append(np.matmul(self.H[i].T,self.afz[i]))
# Computes the moment part due to the aerodynamic force
self.smx.append(np.matmul(self.H[i].T,self.amx[i]))
self.smy.append(np.matmul(self.H[i].T,self.amy[i]))
self.smz.append(np.matmul(self.H[i].T,self.amz[i]))
# Swept wing have a tendency to increase the central lift
M = int(np.floor(len(self.sfx[i])/2))
# Damps the inital and final jump
self.sfx[i][0] = self.sfx[i][1]#*0
self.sfx[i][-1] = self.sfx[i][-2]#*0
self.sfx[i][M] = self.sfx[i][M-1]
self.sfy[i][0] = self.sfy[i][1]#*0
self.sfy[i][-1] = self.sfy[i][-2]#*0
self.sfy[i][M] = self.sfy[i][M-1]
self.sfz[i][0] = self.sfz[i][1]#*0
self.sfz[i][-1] = self.sfz[i][-2]#*0
self.sfz[i][M] = self.sfz[i][M-1]
# Damps the inital and final jump
self.smx[i][0] = self.smx[i][1]#*0
self.smx[i][-1] = self.smx[i][-2]#*0
self.smx[i][M] = self.smx[i][M-1]
self.smy[i][0] = self.smy[i][1]#*0
self.smy[i][-1] = self.smy[i][-2]#*0
self.smy[i][M] = self.smy[i][M-1]
self.smz[i][0] = self.smz[i][1]#*0
self.smz[i][-1] = self.smz[i][-2]#*0
self.smz[i][M] = self.smz[i][M-1]
# logger.debug(self.smy)
# Saves data for verificaiton
df = pd.DataFrame()
# Structure mesh node position
df['x'] = self.geo.aircraftNodesPoints[0][:,0]
df['y'] = self.geo.aircraftNodesPoints[0][:,1]
df['z'] = self.geo.aircraftNodesPoints[0][:,2]
# Forces
df['Fx'] = | pd.Series(self.sfx[0]) | pandas.Series |
import pandas as pd
nfa = {}
nos = int(input("Enter the number of states : "))
trans = int(input("Enter the number of transitions : "))
for i in range(nos):
state = input("Enter the state name : ")
nfa[state] = {}
for j in range(trans):
path = input("Enter the path : ")
print("Enter end state from state {} passing through the path {} : ".format(state,path))
destination_State = [x for x in input().split()]
nfa[state][path] = destination_State
print("\nNFA :- \n")
print(nfa)
print("\nThe NFA Table :- ")
nfa_table = pd.DataFrame(nfa)
print(nfa_table.transpose())
print("Enter End state of NFA : ")
nfa_final_state = [x for x in input().split()]
state_arrray = []
dfa = {}
keys = list(list(nfa.keys())[0])
crossways = list(nfa[keys[0]].keys())
dfa[keys[0]] = {}
for y in range(trans):
var = "".join(nfa[keys[0]][crossways[y]])
dfa[keys[0]][crossways[y]] = var
if var not in keys:
state_arrray.append(var)
keys.append(var)
while len(state_arrray) != 0:
dfa[state_arrray[0]] = {}
for _ in range(len(state_arrray[0])):
for i in range(len(crossways)):
temporary_List = []
for j in range(len(state_arrray[0])):
temporary_List += nfa[state_arrray[0][j]][crossways[i]]
s = ""
s = s.join(temporary_List)
if s not in keys:
state_arrray.append(s)
keys.append(s)
dfa[state_arrray[0]][crossways[i]] = s
state_arrray.remove(state_arrray[0])
print("\nDFA :- \n")
print(dfa)
print("\The DFA table :- ")
dfa_table = | pd.DataFrame(dfa) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.fillna( | pd.Timestamp('2011-01-02 10:00') | pandas.Timestamp |
# UN OCHA COVID-19 INTERNATIONAL MODELING - Task 6
# 2020 The Johns Hopkins University Applied Physics Laboratory LLC
# Modelling cholera transmission dynamics in the presence of limited resources
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6676578/pdf/13104_2019_Article_4504.pdf
# This implementation uses system of equations #3.
import numpy as np
import pandas as pd
import os
import argparse
from scipy.integrate import solve_ivp
import plotly.express as px
import plotly.io as pio
import plotly
# plotly.io.orca.config.executable = "~/anaconda3/bin/orca"
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
DEFAULT_COLORS = px.colors.qualitative.T10
COLORS = [DEFAULT_COLORS[1], DEFAULT_COLORS[2], DEFAULT_COLORS[3], DEFAULT_COLORS[6]]
COLUMN_NAMES = ["Susceptible", "Infected", "Recovered", "Bacteria"]
# currently a global variable, because solve_ivp() doesn't accept custom parameters in its function signature
PARAMS = {}
# calculate the basic reproduction number R0
def calculate_R0(params):
Ri = params["beta1"] / (params["mu"] + params["gamma1"])
Rx = (params["alpha"] * params["beta2"]) / (
params["khat"] * params["delta"] * (params["mu"] + params["gamma1"])
)
return Ri + Rx
# calculate the rate of recovery, bounded by the min and max rates (gamma)
def recovery(params, I):
return params["gamma0"] + (params["gamma1"] - params["gamma0"]) * (
params["bhat"] / (I + params["bhat"])
)
# calculate the change in Susceptible persons wrt to time
def dS_dt(params, S, I, R, B):
return (
(params["mu"])
- (params["beta1"] * S * I)
- (params["beta2"] * S * B / (B + params["khat"]))
- (params["mu"] * S)
)
# calculate the change in Infected persons wrt to time
def dI_dt(params, S, I, R, B):
return (
(params["beta1"] * S * I)
+ (params["beta2"] * S * B / (B + params["khat"]))
- (recovery(params, I) * I)
- (params["mu"] * I)
)
# calculate the change in Recovered persons wrt to time
def dR_dt(params, I, R):
return recovery(params, I) * I - (params["mu"] * R)
# calculate the change in Bacteria wrt to time
def dB_dt(params, I, B):
return params["alpha"] * I - params["delta"] * B
# the system of differential equations for all populations
def dP_dt(t, init_conditions):
# initial conditions
S, I, R, B = init_conditions
# temp fix: update the model parameters using the global variable
# (should be a function parameter)
global PARAMS
return [
dS_dt(PARAMS, S, I, R, B),
dI_dt(PARAMS, S, I, R, B),
dR_dt(PARAMS, I, R),
dB_dt(PARAMS, I, B),
]
# integrate across a single interval of integration
def integrate(parameters, interval, init_conditions, step_size=1, population_error=0):
# update the global parameters with what is passed into the function
global PARAMS
PARAMS = parameters
# get all time steps
ts = np.linspace(
interval[0], interval[1], 1 + int((interval[1] - interval[0]) / step_size)
)
# solve the system
print(interval, len(ts))
solution = solve_ivp(dP_dt, interval, init_conditions, t_eval=ts, method="RK45")
# check that the total population still adds up to 100%
if population_error:
for s, i, r, b in zip(
solution["y"][0],
solution["y"][1],
solution["y"][2],
solution["y"][3],
):
assert 1 - population_error <= (s + i + r) <= 1 + population_error
# put the solution into a DataFrame
df = pd.DataFrame(solution["y"].T, columns=COLUMN_NAMES)
df["time"] = ts
df = df.set_index("time")
# update the "initial" conditions with the current state variables
state = {
"s0": solution["y"][0][-1],
"i0": solution["y"][1][-1],
"r0": solution["y"][2][-1],
"b0": solution["y"][3][-1],
}
return df, state
def piecewise_integration(time_series_params):
# initialize
first = sorted(time_series_params.items(), key=lambda kv: kv[0])[0][1]
init_conditions = [first["s0"], first["i0"], first["r0"], first["b0"]]
results = | pd.DataFrame() | pandas.DataFrame |
# *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the jupyterlab_templates library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
import pandas as pd
import talib as t
def adx(
client,
symbol,
timeframe="6m",
highcol="high",
lowcol="low",
closecol="close",
period=14,
):
"""This will return a dataframe of average directional movement index for the given symbol across
the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
period (int): period to calculate adx across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
adx = t.ADX(df[highcol].values, df[lowcol].values, df[closecol].values, period)
return pd.DataFrame(
{
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"adx": adx,
}
)
def adxr(
client,
symbol,
timeframe="6m",
highcol="high",
lowcol="low",
closecol="close",
period=14,
):
"""This will return a dataframe of average directional movement index rating for the given symbol across
the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
adx = t.ADXR(df[highcol].values, df[lowcol].values, df[closecol].values, period)
return pd.DataFrame(
{
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"adx": adx,
}
)
def apo(
client, symbol, timeframe="6m", col="close", fastperiod=12, slowperiod=26, matype=0
):
"""This will return a dataframe of Absolute Price Oscillator for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
col (string): column to use to calculate
fastperiod (int): fast period to calculate across
slowperiod (int): slow period to calculate across
matype (int): moving average type (0-sma)
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
apo = t.APO(df[col].values, fastperiod, slowperiod, matype)
return pd.DataFrame({col: df[col].values, "apo": apo})
def aroon(client, symbol, timeframe="6m", highcol="high", lowcol="low", period=14):
"""This will return a dataframe of
Aroon
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
aroondown, aroonup = t.AROON(df[highcol].values, df[lowcol].values, period)
return pd.DataFrame(
{
highcol: df[highcol].values,
lowcol: df[lowcol].values,
"aroonup": aroonup,
"aroondown": aroondown,
}
)
def aroonosc(client, symbol, timeframe="6m", highcol="high", lowcol="low", period=14):
"""This will return a dataframe of
Aroon Oscillator
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
x = t.AROONOSC(df[highcol].values, df[lowcol].values, period)
return pd.DataFrame(
{highcol: df[highcol].values, lowcol: df[lowcol].values, "aroonosc": x}
)
def bop(
client,
symbol,
timeframe="6m",
highcol="high",
lowcol="low",
closecol="close",
volumecol="volume",
):
"""This will return a dataframe of
Balance of power
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
volumecol (string): column to use to calculate
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
x = t.BOP(
df[highcol].values, df[lowcol].values, df[closecol].values, df[volumecol].values
)
return pd.DataFrame(
{
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
volumecol: df[volumecol].values,
"bop": x,
}
)
def cci(
client,
symbol,
timeframe="6m",
highcol="high",
lowcol="low",
closecol="close",
period=14,
):
"""This will return a dataframe of
Commodity Channel Index
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
x = t.CCI(df[highcol].values, df[lowcol].values, df[closecol].values, period)
return pd.DataFrame(
{
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"cci": x,
}
)
def cmo(client, symbol, timeframe="6m", col="close", period=14):
"""This will return a dataframe of
Chande Momentum Oscillator
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
col (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
return pd.DataFrame({col: df[col].values, "cmo": t.CMO(df[col].values, period)})
def dx(
client,
symbol,
timeframe="6m",
highcol="high",
lowcol="low",
closecol="close",
period=14,
):
"""This will return a dataframe of
Directional Movement Index
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
x = t.DX(df[highcol].values, df[lowcol].values, df[closecol].values, period)
return pd.DataFrame(
{
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"dx": x,
}
)
def macd(
client,
symbol,
timeframe="6m",
col="close",
fastperiod=12,
slowperiod=26,
signalperiod=9,
):
"""This will return a dataframe of Moving Average Convergence/Divergence for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
col (string): column to use to calculate
fastperiod (int): fast period to calculate across
slowperiod (int): slow period to calculate across
signalperiod (int): macd signal period
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
macd, macdsignal, macdhist = t.MACD(
df[col].values, fastperiod, slowperiod, signalperiod
)
return pd.DataFrame(
{
col: df[col].values,
"macd": macd,
"macdsignal": macdsignal,
"macdhist": macdhist,
}
)
def macdext(
client,
symbol,
timeframe="6m",
col="close",
fastperiod=12,
fastmatype=0,
slowperiod=26,
slowmatype=0,
signalperiod=9,
signalmatype=0,
):
"""This will return a dataframe of Moving Average Convergence/Divergence for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
col (string): column to use to calculate
fastperiod (int): fast period to calculate across
fastmatype (int): moving average type (0-sma)
slowperiod (int): slow period to calculate across
slowmatype (int): moving average type (0-sma)
signalperiod (int): macd signal period
signalmatype (int): moving average type (0-sma)
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
macd, macdsignal, macdhist = t.MACDEXT(
df[col].values, fastperiod, slowperiod, signalperiod
)
return pd.DataFrame(
{
col: df[col].values,
"macd": macd,
"macdsignal": macdsignal,
"macdhist": macdhist,
}
)
def mfi(
client,
symbol,
timeframe="6m",
highcol="high",
lowcol="low",
closecol="close",
volumecol="volume",
period=14,
):
"""This will return a dataframe of
Money Flow Index
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
x = t.MFI(
df[highcol].values,
df[lowcol].values,
df[closecol].values,
df[volumecol].values,
period,
)
return pd.DataFrame(
{
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
volumecol: df[volumecol].values,
"mfi": x,
}
)
def minus_di(
client,
symbol,
timeframe="6m",
highcol="high",
lowcol="low",
closecol="close",
period=14,
):
"""This will return a dataframe of
Minus Directional Indicator
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
x = t.MINUS_DI(df[highcol].values, df[lowcol].values, df[closecol].values, period)
return pd.DataFrame(
{
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"minus_di": x,
}
)
def minus_dm(client, symbol, timeframe="6m", highcol="high", lowcol="low", period=14):
"""This will return a dataframe of
Minus Directional Movement
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
x = t.MINUS_DM(df[highcol].values, df[lowcol].values, period)
return pd.DataFrame(
{highcol: df[highcol].values, lowcol: df[lowcol].values, "minus_dm": x}
)
def mom(client, symbol, timeframe="6m", col="close", period=14):
"""This will return a dataframe of
Momentum
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
col (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
return pd.DataFrame({col: df[col].values, "mom": t.MOM(df[col].values, period)})
def plus_di(
client,
symbol,
timeframe="6m",
highcol="high",
lowcol="low",
closecol="close",
period=14,
):
"""This will return a dataframe of
Plus Directional Movement
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
x = t.PLUS_DI(df[highcol].values, df[lowcol].values, df[closecol].values, period)
return pd.DataFrame(
{
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"plus_di": x,
}
)
def plus_dm(client, symbol, timeframe="6m", highcol="high", lowcol="low", period=14):
"""This will return a dataframe of
Plus Directional Movement
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
x = t.PLUS_DM(df[highcol].values, df[lowcol].values, period)
return pd.DataFrame(
{highcol: df[highcol].values, lowcol: df[lowcol].values, "plus_dm": x}
)
def ppo(
client, symbol, timeframe="6m", col="close", fastperiod=12, slowperiod=26, matype=0
):
"""This will return a dataframe of Percentage Price Oscillator for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
col (string): column to use to calculate
fastperiod (int): fast period to calculate across
slowperiod (int): slow period to calculate across
matype (int): moving average type (0-sma)
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
ppo = t.PPO(df[col].values, fastperiod, slowperiod, matype)
return pd.DataFrame({col: df[col].values, "ppo": ppo})
def roc(client, symbol, timeframe="6m", col="close", period=14):
"""This will return a dataframe of
Rate of change: ((price/prevPrice)-1)*100
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
col (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
return pd.DataFrame({col: df[col].values, "roc": t.ROC(df[col].values, period)})
def rocp(client, symbol, timeframe="6m", col="close", period=14):
"""This will return a dataframe of
Rate of change Percentage: (price-prevPrice)/prevPrice
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
col (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
return pd.DataFrame({col: df[col].values, "rocp": t.ROCP(df[col].values, period)})
def rocr(client, symbol, timeframe="6m", col="close", period=14):
"""This will return a dataframe of
Rate of change ratio: (price/prevPrice)
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
col (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
return pd.DataFrame({col: df[col].values, "rocr": t.ROCR(df[col].values, period)})
def rocr100(client, symbol, timeframe="6m", col="close", period=14):
"""This will return a dataframe of
Rate of change ratio 100 scale: (price/prevPrice)*100
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
col (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
return pd.DataFrame(
{col: df[col].values, "rocr100": t.ROCR100(df[col].values, period)}
)
def rsi(client, symbol, timeframe="6m", col="close", period=14):
"""This will return a dataframe of
Relative Strength Index
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
col (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
return pd.DataFrame({col: df[col].values, "rsi": t.RSI(df[col].values, period)})
def stoch(
client,
symbol,
timeframe="6m",
highcol="high",
lowcol="low",
closecol="close",
fastk_period=5,
slowk_period=3,
slowk_matype=0,
slowd_period=3,
slowd_matype=0,
):
"""This will return a dataframe of
Stochastic
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
fastk_period (int): fastk_period
slowk_period (int): slowk_period
slowk_matype (int): slowk_matype
slowd_period (int): slowd_period
slowd_matype (int): slowd_matype
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
slowk, slowd = t.STOCH(
df[highcol].values,
df[lowcol].values,
df[closecol].values,
fastk_period=fastk_period,
slowk_period=slowk_period,
slowk_matype=slowk_matype,
slowd_period=slowd_period,
slowd_matype=slowd_matype,
)
return pd.DataFrame(
{
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"slowk": slowk,
"slowd": slowd,
}
)
def stochf(
client,
symbol,
timeframe="6m",
highcol="high",
lowcol="low",
closecol="close",
fastk_period=5,
slowk_period=3,
slowk_matype=0,
slowd_period=3,
slowd_matype=0,
):
"""This will return a dataframe of
Stochastic Fast
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
fastk_period (int): fastk_period
slowk_period (int): slowk_period
slowk_matype (int): slowk_matype
slowd_period (int): slowd_period
slowd_matype (int): slowd_matype
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
fastk, fastd = t.STOCHF(
df[highcol].values,
df[lowcol].values,
df[closecol].values,
fastk_period=fastk_period,
slowk_period=slowk_period,
slowk_matype=slowk_matype,
slowd_period=slowd_period,
slowd_matype=slowd_matype,
)
return pd.DataFrame(
{
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"fastk": fastk,
"fastd": fastd,
}
)
def stochrsi(
client,
symbol,
timeframe="6m",
closecol="close",
fastk_period=5,
slowk_period=3,
slowk_matype=0,
slowd_period=3,
slowd_matype=0,
):
"""This will return a dataframe of
Williams' % R
for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
period (int): period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
fastk, fastd = t.STOCHF(
df[closecol].values,
fastk_period=fastk_period,
slowk_period=slowk_period,
slowk_matype=slowk_matype,
slowd_period=slowd_period,
slowd_matype=slowd_matype,
)
return | pd.DataFrame({closecol: df[closecol].values, "fastk": fastk, "fastd": fastd}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Author: Tishacy
# Date: 2021-03-26
import os
import logging
import pandas as pd
from .query import Query
from .parser import PostParser, CommentParser, TagPostParser
from .downloader import Downloader, Resource
from .common import POSTS_QUERY_HASH_PARAM, \
COMMENTS_QUERY_HASH_PARAM, TAG_POSTS_QUERY_HASH_PARAM
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
logger = logging.getLogger('instagram')
def task_fetch_posts_and_comments(
author_id,
count=28,
posts_out='data/posts_data.xlsx',
comments_out='data/comments_data.xlsx'):
"""[Task] Fetch a specific number of posts of the given author and the comments
of these posts, and save them to files.
:param author_id: author id
:param count: number of posts to fetch
:param posts_out: out file of the posts data
:param comments_out: out file of the comments data
:return None:
"""
# Create query instances for posts and comments
post_query = Query(PostParser)
comment_query = Query(CommentParser)
# Query posts data
post_data = post_query.query_all(POSTS_QUERY_HASH_PARAM, {
"id": author_id,
"first": 50,
}, count)
logger.info("Count of posts data: %d" % len(post_data))
# Save the posts data
post_data_df = pd.DataFrame(post_data)
post_data_df.to_excel(posts_out, encoding='utf-8', index=False)
logger.info("Save the posts data to %s." % posts_out)
# Query comments data of posts
comment_data = []
for i, post in enumerate(post_data):
logger.info("Get comment of %d %s" % (i, post['short_code']))
comment_data_of_one_post = comment_query.query_all(COMMENTS_QUERY_HASH_PARAM, {
"shortcode": post['short_code'],
"first": 50,
}, None)
for comment in comment_data_of_one_post:
comment['post_short_code'] = post['short_code']
comment_data.extend(comment_data_of_one_post)
logger.info("Count of comment_data: %d" % len(comment_data))
# Save the comments data
comment_data_df = pd.DataFrame(comment_data)
comment_data_df.to_excel(comments_out, encoding='utf-8', index=False)
logger.info("Save the comments data to %s." % comments_out)
def task_fetch_tag_posts_and_comments(
tag_name,
count=100,
posts_out='data/tag_posts_data.xlsx',
comments_out='data/tag_comments_data.xlsx'):
"""[Task] Fetch a specific number of posts of the given tag and the comments
of these posts, and save them to files.
:param tag_name: tag name
:param count: number of posts to fetch
:param posts_out: out file of the posts data
:param comments_out: out file of the comments data
:return None:
"""
# Create query instances for posts and comments
post_query = Query(TagPostParser)
comment_query = Query(CommentParser)
# Query posts data
post_data = post_query.query_all(TAG_POSTS_QUERY_HASH_PARAM, {
"tag_name": tag_name,
"first": 50,
}, count)
logger.info("Count of posts data: %d" % len(post_data))
# Save the posts data
post_data_df = pd.DataFrame(post_data)
post_data_df.to_excel(posts_out, encoding='utf-8', index=False)
logger.info("Save the posts data to %s." % posts_out)
# Query comments data of posts
comment_data = []
for i, post in enumerate(post_data):
logger.info("Get comment of %d %s" % (i, post['short_code']))
comment_data_of_one_post = comment_query.query_all(COMMENTS_QUERY_HASH_PARAM, {
"shortcode": post['short_code'],
"first": 50,
}, 100)
for comment in comment_data_of_one_post:
comment['post_short_code'] = post['short_code']
comment_data.extend(comment_data_of_one_post)
logger.info("Count of comment_data: %d" % len(comment_data))
# Save the comments data
comment_data_df = pd.DataFrame(comment_data)
comment_data_df.to_excel(comments_out, encoding='utf-8', index=False)
logger.info("Save the comments data to %s." % comments_out)
def task_fetch_posts(
author_id,
count=28,
posts_out='data/posts_data.xlsx'):
"""[Task] Fetch a specific number of posts of the given author and the comments
of these posts, and save them to files.
:param author_id: author id
:param count: number of posts to fetch
:param posts_out: out file of the posts data
:return None:
"""
# Create query instances for posts
post_query = Query(PostParser)
# Query posts data
post_data = post_query.query_all(POSTS_QUERY_HASH_PARAM, {
"id": author_id,
"first": 50,
}, count)
logger.info("Count of posts data: %d" % len(post_data))
# Save the posts data
post_data_df = pd.DataFrame(post_data)
post_data_df.to_excel(posts_out, encoding='utf-8', index=False)
logger.info("Save the posts data to %s." % posts_out)
def task_fetch_tag_posts(
tag_name,
count=100,
posts_out='data/tag_posts_data.xlsx'):
"""[Task] Fetch a specific number of posts of the given tag and the comments
of these posts, and save them to files.
:param tag_name: tag name
:param count: number of posts to fetch
:param posts_out: out file of the posts data
:return None:
"""
# Create query instances for posts
post_query = Query(TagPostParser)
# Query posts data
post_data = post_query.query_all(TAG_POSTS_QUERY_HASH_PARAM, {
"tag_name": tag_name,
"first": 50,
}, count)
logger.info("Count of posts data: %d" % len(post_data))
# Save the posts data
post_data_df = | pd.DataFrame(post_data) | pandas.DataFrame |
import pandas as pd
def fix_datasets():
dati = pd.read_csv("dati_regioni.csv")
regioni = | pd.read_csv("regioni.csv") | pandas.read_csv |
import copy
import re
from textwrap import dedent
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
)
import pandas._testing as tm
jinja2 = pytest.importorskip("jinja2")
from pandas.io.formats.style import ( # isort:skip
Styler,
)
from pandas.io.formats.style_render import (
_get_level_lengths,
_get_trimming_maximums,
maybe_convert_css_to_tuples,
non_reducing_slice,
)
@pytest.fixture
def mi_df():
return DataFrame(
[[1, 2], [3, 4]],
index=MultiIndex.from_product([["i0"], ["i1_a", "i1_b"]]),
columns=MultiIndex.from_product([["c0"], ["c1_a", "c1_b"]]),
dtype=int,
)
@pytest.fixture
def mi_styler(mi_df):
return Styler(mi_df, uuid_len=0)
@pytest.fixture
def mi_styler_comp(mi_styler):
# comprehensively add features to mi_styler
mi_styler = mi_styler._copy(deepcopy=True)
mi_styler.css = {**mi_styler.css, **{"row": "ROW", "col": "COL"}}
mi_styler.uuid_len = 5
mi_styler.uuid = "abcde"
mi_styler.set_caption("capt")
mi_styler.set_table_styles([{"selector": "a", "props": "a:v;"}])
mi_styler.hide(axis="columns")
mi_styler.hide([("c0", "c1_a")], axis="columns", names=True)
mi_styler.hide(axis="index")
mi_styler.hide([("i0", "i1_a")], axis="index", names=True)
mi_styler.set_table_attributes('class="box"')
mi_styler.format(na_rep="MISSING", precision=3)
mi_styler.format_index(precision=2, axis=0)
mi_styler.format_index(precision=4, axis=1)
mi_styler.highlight_max(axis=None)
mi_styler.applymap_index(lambda x: "color: white;", axis=0)
mi_styler.applymap_index(lambda x: "color: black;", axis=1)
mi_styler.set_td_classes(
DataFrame(
[["a", "b"], ["a", "c"]], index=mi_styler.index, columns=mi_styler.columns
)
)
mi_styler.set_tooltips(
DataFrame(
[["a2", "b2"], ["a2", "c2"]],
index=mi_styler.index,
columns=mi_styler.columns,
)
)
return mi_styler
@pytest.mark.parametrize(
"sparse_columns, exp_cols",
[
(
True,
[
{"is_visible": True, "attributes": 'colspan="2"', "value": "c0"},
{"is_visible": False, "attributes": "", "value": "c0"},
],
),
(
False,
[
{"is_visible": True, "attributes": "", "value": "c0"},
{"is_visible": True, "attributes": "", "value": "c0"},
],
),
],
)
def test_mi_styler_sparsify_columns(mi_styler, sparse_columns, exp_cols):
exp_l1_c0 = {"is_visible": True, "attributes": "", "display_value": "c1_a"}
exp_l1_c1 = {"is_visible": True, "attributes": "", "display_value": "c1_b"}
ctx = mi_styler._translate(True, sparse_columns)
assert exp_cols[0].items() <= ctx["head"][0][2].items()
assert exp_cols[1].items() <= ctx["head"][0][3].items()
assert exp_l1_c0.items() <= ctx["head"][1][2].items()
assert exp_l1_c1.items() <= ctx["head"][1][3].items()
@pytest.mark.parametrize(
"sparse_index, exp_rows",
[
(
True,
[
{"is_visible": True, "attributes": 'rowspan="2"', "value": "i0"},
{"is_visible": False, "attributes": "", "value": "i0"},
],
),
(
False,
[
{"is_visible": True, "attributes": "", "value": "i0"},
{"is_visible": True, "attributes": "", "value": "i0"},
],
),
],
)
def test_mi_styler_sparsify_index(mi_styler, sparse_index, exp_rows):
exp_l1_r0 = {"is_visible": True, "attributes": "", "display_value": "i1_a"}
exp_l1_r1 = {"is_visible": True, "attributes": "", "display_value": "i1_b"}
ctx = mi_styler._translate(sparse_index, True)
assert exp_rows[0].items() <= ctx["body"][0][0].items()
assert exp_rows[1].items() <= ctx["body"][1][0].items()
assert exp_l1_r0.items() <= ctx["body"][0][1].items()
assert exp_l1_r1.items() <= ctx["body"][1][1].items()
def test_mi_styler_sparsify_options(mi_styler):
with pd.option_context("styler.sparse.index", False):
html1 = mi_styler.to_html()
with pd.option_context("styler.sparse.index", True):
html2 = mi_styler.to_html()
assert html1 != html2
with pd.option_context("styler.sparse.columns", False):
html1 = mi_styler.to_html()
with pd.option_context("styler.sparse.columns", True):
html2 = mi_styler.to_html()
assert html1 != html2
@pytest.mark.parametrize(
"rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn",
[
(100, 100, 100, None, None, 12, 6), # reduce to (12, 6) < 100 elements
(1000, 3, 750, None, None, 250, 3), # dynamically reduce rows to 250, keep cols
(4, 1000, 500, None, None, 4, 125), # dynamically reduce cols to 125, keep rows
(1000, 3, 750, 10, None, 10, 3), # overwrite above dynamics with max_row
(4, 1000, 500, None, 5, 4, 5), # overwrite above dynamics with max_col
(100, 100, 700, 50, 50, 25, 25), # rows cols below given maxes so < 700 elmts
],
)
def test_trimming_maximum(rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn):
rn, cn = _get_trimming_maximums(
rn, cn, max_els, max_rows, max_cols, scaling_factor=0.5
)
assert (rn, cn) == (exp_rn, exp_cn)
@pytest.mark.parametrize(
"option, val",
[
("styler.render.max_elements", 6),
("styler.render.max_rows", 3),
],
)
def test_render_trimming_rows(option, val):
# test auto and specific trimming of rows
df = DataFrame(np.arange(120).reshape(60, 2))
with pd.option_context(option, val):
ctx = df.style._translate(True, True)
assert len(ctx["head"][0]) == 3 # index + 2 data cols
assert len(ctx["body"]) == 4 # 3 data rows + trimming row
assert len(ctx["body"][0]) == 3 # index + 2 data cols
@pytest.mark.parametrize(
"option, val",
[
("styler.render.max_elements", 6),
("styler.render.max_columns", 2),
],
)
def test_render_trimming_cols(option, val):
# test auto and specific trimming of cols
df = DataFrame(np.arange(30).reshape(3, 10))
with pd.option_context(option, val):
ctx = df.style._translate(True, True)
assert len(ctx["head"][0]) == 4 # index + 2 data cols + trimming col
assert len(ctx["body"]) == 3 # 3 data rows
assert len(ctx["body"][0]) == 4 # index + 2 data cols + trimming col
def test_render_trimming_mi():
midx = MultiIndex.from_product([[1, 2], [1, 2, 3]])
df = DataFrame(np.arange(36).reshape(6, 6), columns=midx, index=midx)
with pd.option_context("styler.render.max_elements", 4):
ctx = df.style._translate(True, True)
assert len(ctx["body"][0]) == 5 # 2 indexes + 2 data cols + trimming row
assert {"attributes": 'rowspan="2"'}.items() <= ctx["body"][0][0].items()
assert {"class": "data row0 col_trim"}.items() <= ctx["body"][0][4].items()
assert {"class": "data row_trim col_trim"}.items() <= ctx["body"][2][4].items()
assert len(ctx["body"]) == 3 # 2 data rows + trimming row
assert len(ctx["head"][0]) == 5 # 2 indexes + 2 column headers + trimming col
assert {"attributes": 'colspan="2"'}.items() <= ctx["head"][0][2].items()
def test_render_empty_mi():
# GH 43305
df = DataFrame(index=MultiIndex.from_product([["A"], [0, 1]], names=[None, "one"]))
expected = dedent(
"""\
>
<thead>
<tr>
<th class="index_name level0" > </th>
<th class="index_name level1" >one</th>
</tr>
</thead>
"""
)
assert expected in df.style.to_html()
@pytest.mark.parametrize("comprehensive", [True, False])
@pytest.mark.parametrize("render", [True, False])
@pytest.mark.parametrize("deepcopy", [True, False])
def test_copy(comprehensive, render, deepcopy, mi_styler, mi_styler_comp):
styler = mi_styler_comp if comprehensive else mi_styler
styler.uuid_len = 5
s2 = copy.deepcopy(styler) if deepcopy else copy.copy(styler) # make copy and check
assert s2 is not styler
if render:
styler.to_html()
excl = [
"na_rep", # deprecated
"precision", # deprecated
"cellstyle_map", # render time vars..
"cellstyle_map_columns",
"cellstyle_map_index",
"template_latex", # render templates are class level
"template_html",
"template_html_style",
"template_html_table",
]
if not deepcopy: # check memory locations are equal for all included attributes
for attr in [a for a in styler.__dict__ if (not callable(a) and a not in excl)]:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
else: # check memory locations are different for nested or mutable vars
shallow = [
"data",
"columns",
"index",
"uuid_len",
"uuid",
"caption",
"cell_ids",
"hide_index_",
"hide_columns_",
"hide_index_names",
"hide_column_names",
"table_attributes",
]
for attr in shallow:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
for attr in [
a
for a in styler.__dict__
if (not callable(a) and a not in excl and a not in shallow)
]:
if getattr(s2, attr) is None:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
else:
assert id(getattr(s2, attr)) != id(getattr(styler, attr))
def test_clear(mi_styler_comp):
# NOTE: if this test fails for new features then 'mi_styler_comp' should be updated
# to ensure proper testing of the 'copy', 'clear', 'export' methods with new feature
# GH 40675
styler = mi_styler_comp
styler._compute() # execute applied methods
clean_copy = Styler(styler.data, uuid=styler.uuid)
excl = [
"data",
"index",
"columns",
"uuid",
"uuid_len", # uuid is set to be the same on styler and clean_copy
"cell_ids",
"cellstyle_map", # execution time only
"cellstyle_map_columns", # execution time only
"cellstyle_map_index", # execution time only
"precision", # deprecated
"na_rep", # deprecated
"template_latex", # render templates are class level
"template_html",
"template_html_style",
"template_html_table",
]
# tests vars are not same vals on obj and clean copy before clear (except for excl)
for attr in [a for a in styler.__dict__ if not (callable(a) or a in excl)]:
res = getattr(styler, attr) == getattr(clean_copy, attr)
assert not (all(res) if (hasattr(res, "__iter__") and len(res) > 0) else res)
# test vars have same vales on obj and clean copy after clearing
styler.clear()
for attr in [a for a in styler.__dict__ if not (callable(a))]:
res = getattr(styler, attr) == getattr(clean_copy, attr)
assert all(res) if hasattr(res, "__iter__") else res
def test_export(mi_styler_comp, mi_styler):
exp_attrs = [
"_todo",
"hide_index_",
"hide_index_names",
"hide_columns_",
"hide_column_names",
"table_attributes",
"table_styles",
"css",
]
for attr in exp_attrs:
check = getattr(mi_styler, attr) == getattr(mi_styler_comp, attr)
assert not (
all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
)
export = mi_styler_comp.export()
used = mi_styler.use(export)
for attr in exp_attrs:
check = getattr(used, attr) == getattr(mi_styler_comp, attr)
assert all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
used.to_html()
def test_hide_raises(mi_styler):
msg = "`subset` and `level` cannot be passed simultaneously"
with pytest.raises(ValueError, match=msg):
mi_styler.hide(axis="index", subset="something", level="something else")
msg = "`level` must be of type `int`, `str` or list of such"
with pytest.raises(ValueError, match=msg):
mi_styler.hide(axis="index", level={"bad": 1, "type": 2})
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
def test_hide_index_level(mi_styler, level):
mi_styler.index.names, mi_styler.columns.names = ["zero", "one"], ["zero", "one"]
ctx = mi_styler.hide(axis="index", level=level)._translate(False, True)
assert len(ctx["head"][0]) == 3
assert len(ctx["head"][1]) == 3
assert len(ctx["head"][2]) == 4
assert ctx["head"][2][0]["is_visible"]
assert not ctx["head"][2][1]["is_visible"]
assert ctx["body"][0][0]["is_visible"]
assert not ctx["body"][0][1]["is_visible"]
assert ctx["body"][1][0]["is_visible"]
assert not ctx["body"][1][1]["is_visible"]
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
@pytest.mark.parametrize("names", [True, False])
def test_hide_columns_level(mi_styler, level, names):
mi_styler.columns.names = ["zero", "one"]
if names:
mi_styler.index.names = ["zero", "one"]
ctx = mi_styler.hide(axis="columns", level=level)._translate(True, False)
assert len(ctx["head"]) == (2 if names else 1)
@pytest.mark.parametrize("method", ["applymap", "apply"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_map_header(method, axis):
# GH 41893
df = DataFrame({"A": [0, 0], "B": [1, 1]}, index=["C", "D"])
func = {
"apply": lambda s: ["attr: val" if ("A" in v or "C" in v) else "" for v in s],
"applymap": lambda v: "attr: val" if ("A" in v or "C" in v) else "",
}
# test execution added to todo
result = getattr(df.style, f"{method}_index")(func[method], axis=axis)
assert len(result._todo) == 1
assert len(getattr(result, f"ctx_{axis}")) == 0
# test ctx object on compute
result._compute()
expected = {
(0, 0): [("attr", "val")],
}
assert getattr(result, f"ctx_{axis}") == expected
@pytest.mark.parametrize("method", ["apply", "applymap"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_map_header_mi(mi_styler, method, axis):
# GH 41893
func = {
"apply": lambda s: ["attr: val;" if "b" in v else "" for v in s],
"applymap": lambda v: "attr: val" if "b" in v else "",
}
result = getattr(mi_styler, f"{method}_index")(func[method], axis=axis)._compute()
expected = {(1, 1): [("attr", "val")]}
assert getattr(result, f"ctx_{axis}") == expected
def test_apply_map_header_raises(mi_styler):
# GH 41893
with pytest.raises(ValueError, match="No axis named bad for object type DataFrame"):
mi_styler.applymap_index(lambda v: "attr: val;", axis="bad")._compute()
class TestStyler:
def setup_method(self, method):
np.random.seed(24)
self.s = DataFrame({"A": np.random.permutation(range(6))})
self.df = DataFrame({"A": [0, 1], "B": np.random.randn(2)})
self.f = lambda x: x
self.g = lambda x: x
def h(x, foo="bar"):
return pd.Series(f"color: {foo}", index=x.index, name=x.name)
self.h = h
self.styler = Styler(self.df)
self.attrs = DataFrame({"A": ["color: red", "color: blue"]})
self.dataframes = [
self.df,
DataFrame(
{"f": [1.0, 2.0], "o": ["a", "b"], "c": pd.Categorical(["a", "b"])}
),
]
self.blank_value = " "
def test_init_non_pandas(self):
msg = "``data`` must be a Series or DataFrame"
with pytest.raises(TypeError, match=msg):
Styler([1, 2, 3])
def test_init_series(self):
result = Styler(pd.Series([1, 2]))
assert result.data.ndim == 2
def test_repr_html_ok(self):
self.styler._repr_html_()
def test_repr_html_mathjax(self):
# gh-19824 / 41395
assert "tex2jax_ignore" not in self.styler._repr_html_()
with pd.option_context("styler.html.mathjax", False):
assert "tex2jax_ignore" in self.styler._repr_html_()
def test_update_ctx(self):
self.styler._update_ctx(self.attrs)
expected = {(0, 0): [("color", "red")], (1, 0): [("color", "blue")]}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi_and_trailing_semi(self):
attrs = DataFrame({"A": ["color: red; foo: bar", "color:blue ; foo: baz;"]})
self.styler._update_ctx(attrs)
expected = {
(0, 0): [("color", "red"), ("foo", "bar")],
(1, 0): [("color", "blue"), ("foo", "baz")],
}
assert self.styler.ctx == expected
def test_render(self):
df = DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red", "color: blue"], name=x.name)
s = | Styler(df, uuid="AB") | pandas.io.formats.style.Styler |
import time
import boto3
from botocore import UNSIGNED
from botocore.config import Config
import botocore
import logging
from multiprocessing import Pool, Manager
import pandas as pd
import os
import argparse
import sys
import functools
from urllib import request
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
def download(bucket, root, retry, counter, lock, path):
i = 0
src = path
dest = f"{root}/{path}"
while i < retry:
try:
if not os.path.exists(dest):
s3.download_file(bucket, src, dest)
else:
logging.info(f"{dest} already exists.")
with lock:
counter.value += 1
if counter.value % 100 == 0:
logging.warning(f"Downloaded {counter.value} images.")
return
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
logging.warning(f"The file s3://{bucket}/{src} does not exist.")
return
i += 1
logging.warning(f"Sleep {i} and try again.")
time.sleep(i)
logging.warning(f"Failed to download the file s3://{bucket}/{src}. Exception: {e}")
def batch_download(bucket, file_paths, root, num_workers=10, retry=10):
with Pool(num_workers) as p:
m = Manager()
counter = m.Value('i', 0)
lock = m.Lock()
download_ = functools.partial(download, bucket, root, retry, counter, lock)
p.map(download_, file_paths)
def http_download(url, path):
with request.urlopen(url) as f:
with open(path, "wb") as fout:
buf = f.read(1024)
while buf:
fout.write(buf)
buf = f.read(1024)
def log_counts(values):
for k, count in values.value_counts().iteritems():
logging.warning(f"{k}: {count}/{len(values)} = {count/len(values):.2f}.")
def parse_args():
parser = argparse.ArgumentParser(
description='Dowload open image dataset by class.')
parser.add_argument("--root", type=str,
help='The root directory that you want to store the open image data.')
parser.add_argument("include_depiction", action="store_true",
help="Do you want to include drawings or depictions?")
parser.add_argument("--class_names", type=str,
help="the classes you want to download.")
parser.add_argument("--num_workers", type=int, default=10,
help="the classes you want to download.")
parser.add_argument("--retry", type=int, default=10,
help="retry times when downloading.")
parser.add_argument("--filter_file", type=str, default="",
help="This file specifies the image ids you want to exclude.")
parser.add_argument('--remove_overlapped', action='store_true',
help="Remove single boxes covered by group boxes.")
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.WARNING,
format='%(asctime)s - %(name)s - %(message)s')
args = parse_args()
bucket = "open-images-dataset"
names = [e.strip() for e in args.class_names.split(",")]
class_names = []
group_filters = []
percentages = []
for name in names:
t = name.split(":")
class_names.append(t[0].strip())
if len(t) >= 2 and t[1].strip():
group_filters.append(t[1].strip())
else:
group_filters.append("")
if len(t) >= 3 and t[2].strip():
percentages.append(float(t[2].strip()))
else:
percentages.append(1.0)
if not os.path.exists(args.root):
os.makedirs(args.root)
excluded_images = set()
if args.filter_file:
for line in open(args.filter_file):
img_id = line.strip()
if not img_id:
continue
excluded_images.add(img_id)
class_description_file = os.path.join(args.root, "class-descriptions-boxable.csv")
if not os.path.exists(class_description_file):
url = "https://temporal_filters.googleapis.com/openimages/2018_04/class-descriptions-boxable.csv"
logging.warning(f"Download {url}.")
http_download(url, class_description_file)
class_descriptions = pd.read_csv(class_description_file,
names=["id", "ClassName"])
class_descriptions = class_descriptions[class_descriptions['ClassName'].isin(class_names)]
image_files = []
for dataset_type in ["train", "validation", "test"]:
image_dir = os.path.join(args.root, dataset_type)
os.makedirs(image_dir, exist_ok=True)
annotation_file = f"{args.root}/{dataset_type}-annotations-bbox.csv"
if not os.path.exists(annotation_file):
url = f"https://temporal_filters.googleapis.com/openimages/2018_04/{dataset_type}/{dataset_type}-annotations-bbox.csv"
logging.warning(f"Download {url}.")
http_download(url, annotation_file)
logging.warning(f"Read annotation file {annotation_file}")
annotations = pd.read_csv(annotation_file)
annotations = pd.merge(annotations, class_descriptions,
left_on="LabelName", right_on="id",
how="inner")
if not args.include_depiction:
annotations = annotations.loc[annotations['IsDepiction'] != 1, :]
filtered = []
for class_name, group_filter, percentage in zip(class_names, group_filters, percentages):
sub = annotations.loc[annotations['ClassName'] == class_name, :]
excluded_images |= set(sub['ImageID'].sample(frac=1 - percentage))
if group_filter == '~group':
excluded_images |= set(sub.loc[sub['IsGroupOf'] == 1, 'ImageID'])
elif group_filter == 'group':
excluded_images |= set(sub.loc[sub['IsGroupOf'] == 0, 'ImageID'])
filtered.append(sub)
annotations = | pd.concat(filtered) | pandas.concat |
import re
import numpy as np
import pytest
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import IntervalIndex, MultiIndex, RangeIndex
import pandas.util.testing as tm
def test_labels_dtypes():
# GH 8456
i = MultiIndex.from_tuples([("A", 1), ("A", 2)])
assert i.codes[0].dtype == "int8"
assert i.codes[1].dtype == "int8"
i = MultiIndex.from_product([["a"], range(40)])
assert i.codes[1].dtype == "int8"
i = MultiIndex.from_product([["a"], range(400)])
assert i.codes[1].dtype == "int16"
i = MultiIndex.from_product([["a"], range(40000)])
assert i.codes[1].dtype == "int32"
i = pd.MultiIndex.from_product([["a"], range(1000)])
assert (i.codes[0] >= 0).all()
assert (i.codes[1] >= 0).all()
def test_values_boxed():
tuples = [
(1, pd.Timestamp("2000-01-01")),
(2, pd.NaT),
(3, pd.Timestamp("2000-01-03")),
(1, pd.Timestamp("2000-01-04")),
(2, pd.Timestamp("2000-01-02")),
(3, pd.Timestamp("2000-01-03")),
]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
# TODO(GH-24559): Remove the FutureWarning
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
aware = pd.DatetimeIndex(ints, tz="US/Central")
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq="D")
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_consistency():
# need to construct an overflow
major_axis = list(range(70000))
minor_axis = list(range(10))
major_codes = np.arange(70000)
minor_codes = np.repeat(range(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(
levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]
)
# inconsistent
major_codes = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(
levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]
)
assert index.is_unique is False
def test_hash_collisions():
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product(
[np.arange(1000), np.arange(1000)], names=["one", "two"]
)
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(len(index), dtype="intp"))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_dims():
pass
def take_invalid_kwargs():
vals = [["A", "B"], [pd.Timestamp("2011-01-01"), | pd.Timestamp("2011-01-02") | pandas.Timestamp |
# -*- coding: utf-8 -*-
#
# @author <NAME>
# @date 12 March 2019
import sys
import os
import numpy as np
import pandas as pd
import itertools
import argparse
def getCIVETSubjectValues(atlas_df, subject_dir, subject_id, smoothing='30'):
""" Parser for surfaces/sub-0050106_T1w_DKT_lobe_thickness_tlink_30mm_left.dat files from CIVET 2.1 output
Uses DKT atlas.
"""
civet_subject_file = subject_dir + '/surfaces/sub-{}_T1w_DKT_lobe_thickness_tlink_{}mm_{}.dat'
civet_subject_both_hemi = | pd.DataFrame() | pandas.DataFrame |
# Lint as: python3
# coding=utf-8
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
import model_bias_analysis as mba
class ModelBiasAnalysisTest(tf.test.TestCase):
def test_add_subgroup_columns_from_text(self):
df = pd.DataFrame({
u'toxicity': [u'nontoxic', u'nontoxic', u'nontoxic', u'nontoxic'],
u'phrase': [
u'You are a woman', u'I am gay', u'Je suis chrétien.',
u'je suis handicapé'
]
})
subgroups = [u'woman', u'gay', u'chrétien', u'handicapé']
mba.add_subgroup_columns_from_text(df, 'phrase', subgroups)
expected_df = pd.DataFrame({
u'toxicity': [u'nontoxic', u'nontoxic', u'nontoxic', u'nontoxic'],
u'phrase': [
u'You are a woman', u'I am gay', u'Je suis chrétien.',
u'je suis handicapé'
],
u'woman': [True, False, False, False],
u'gay': [False, True, False, False],
u'chrétien': [False, False, True, False],
u'handicapé': [False, False, False, True],
})
pd.util.testing.assert_frame_equal(
df.reset_index(drop=True).sort_index(axis='columns'),
expected_df.reset_index(drop=True).sort_index(axis='columns'))
def add_examples(self, data, model_scores, label, subgroup):
num_comments_added = len(model_scores)
data['model_score'].extend(model_scores)
data['label'].extend([label for a in range(num_comments_added)])
data['subgroup'].extend([subgroup for a in range(num_comments_added)])
def make_biased_dataset(self):
data = {'model_score': [], 'label': [], 'subgroup': []}
self.add_examples(data, [0.1, 0.2, 0.3], 0, False)
self.add_examples(data, [0.21, 0.31, 0.55], 0, True)
self.add_examples(data, [0.5, 0.8, 0.9], 1, False)
self.add_examples(data, [0.4, 0.6, 0.71], 1, True)
return pd.DataFrame(data)
def test_squared_diff_integral(self):
x = np.linspace(0.0, 1.0, num=100)
y = [1] * len(x)
result = mba.squared_diff_integral(y, x)
self.assertAlmostEqual(result, 0.333, places=2)
def test_average_squared_equality_gap_no_bias(self):
no_bias_data = {'model_score': [], 'label': [], 'subgroup': []}
low_model_scores = [0.1, 0.2, 0.22, 0.24, 0.26, 0.28, 0.3, 0.4]
high_model_scores = [0.7, 0.8, 0.82, 0.84, 0.86, 0.88, 0.9, 1.0]
self.add_examples(no_bias_data, low_model_scores, 0, False)
self.add_examples(no_bias_data, low_model_scores, 0, True)
self.add_examples(no_bias_data, high_model_scores, 1, False)
self.add_examples(no_bias_data, high_model_scores, 1, True)
no_bias_df = | pd.DataFrame(no_bias_data) | pandas.DataFrame |
import unittest
from setup.settings import *
from numpy.testing import *
from pandas.util.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionMinimumTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_binary_minimum_scalar(self):
self.assertEqual(dnp.minimum(1.2 + 1j, 1.2 - 1j), np.minimum(1.2 + 1j, 1.2 - 1j))
self.assertEqual(dnp.minimum(0.5, 9), np.minimum(0.5, 9))
self.assertEqual(dnp.minimum(-1, 8.5), np.minimum(-1, 8.5))
self.assertEqual(dnp.minimum(1, 4), np.minimum(1, 4))
self.assertEqual(dnp.minimum(1, -5), np.minimum(1, -5))
self.assertEqual(dnp.minimum(0, 9), np.minimum(0, 9))
self.assertEqual(dnp.isnan(dnp.minimum(dnp.nan, -5)), True)
self.assertEqual(np.isnan(np.minimum(dnp.nan, -5)), True)
def test_function_math_binary_minimum_list(self):
lst1 = [1, 2, 3]
lst2 = [4, 6, 9]
assert_array_equal(dnp.minimum(lst1, lst2), np.minimum(lst1, lst2))
def test_function_math_binary_minimum_array_with_scalar(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
assert_array_equal(dnp.minimum(dnpa, 1), np.minimum(npa, 1))
assert_array_equal(dnp.minimum(dnpa, dnp.nan), np.minimum(npa, np.nan))
assert_array_equal(dnp.minimum(1, dnpa), np.minimum(1, npa))
def test_function_math_binary_minimum_array_with_array(self):
npa1 = np.array([1, 2, 3])
npa2 = np.array([4, 6, 9])
dnpa1 = dnp.array([1, 2, 3])
dnpa2 = dnp.array([4, 6, 9])
assert_array_equal(dnp.minimum(dnpa1, dnpa2), np.minimum(npa1, npa2))
def test_function_math_binary_minimum_array_with_array_param_out(self):
npa1 = np.array([1, 2, 3])
npa2 = np.array([4, 6, 9])
npa = np.zeros(shape=(1, 3))
dnpa1 = dnp.array([1, 2, 3])
dnpa2 = dnp.array([4, 6, 9])
dnpa = dnp.zeros(shape=(1, 3))
np.minimum(npa1, npa2, out=npa)
dnp.minimum(dnpa1, dnpa2, out=dnpa)
assert_array_equal(dnpa, npa)
def test_function_math_binary_minimum_array_with_series(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
ps = | pd.Series([4, 6, 9]) | pandas.Series |
from django.http.response import JsonResponse
from django.shortcuts import render,redirect
from django.views.decorators.csrf import csrf_exempt
from .models import Product,Sale,Search
import pandas as pd
import calendar
import json
# Create your views here.
@csrf_exempt
def point_1_1(request):
# Si estamos identificados devolvemos la portada
if request.user.is_authenticated:
context = {}
df_sales, df_searches= process_report_1(1)
data_sales=df_to_json_list(df_sales)
data_searches=df_to_json_list(df_searches)
context = {
'segment': 'point_1_1',
'data_sales': data_sales,
'data_searches': data_searches,
}
return render(request,"core/point_1_1.html",context)
# En otro caso redireccionamos al login
return redirect('/login/')
@csrf_exempt
def point_1_2(request):
# Si estamos identificados devolvemos la portada
if request.user.is_authenticated:
context = {}
category_list=process_report_1(2)
context = {
'segment': 'point_1_2',
'category_list': category_list,
}
return render(request,"core/point_1_2.html",context)
# En otro caso redireccionamos al login
return redirect('/login/')
@csrf_exempt
def point_2(request):
# Si estamos identificados devolvemos la portada
if request.user.is_authenticated:
context = {}
df_score_max, df_score_min=process_report_2()
df_score_max_res=df_to_json_list(df_score_max)
df_score_min_res=df_to_json_list(df_score_min)
context = {
'segment': 'point_2',
'df_score_max_res': df_score_max_res,
'df_score_min_res': df_score_min_res,
}
return render(request,"core/point_2.html",context)
# En otro caso redireccionamos al login
return redirect('/login/')
@csrf_exempt
def point_3_1(request):
# Si estamos identificados devolvemos la portada
if request.user.is_authenticated:
context = {}
dfs=generate_dfs() # df_searches, df_sales, df_products
df_month=show_report_total_monthly_income(dfs[1],dfs[2])
data_total=df_to_json_list(df_month)
context = {
'segment': 'point_3_1',
'data_total': data_total,
}
return render(request,"core/point_3_1.html",context)
# En otro caso redireccionamos al login
return redirect('/login/')
@csrf_exempt
def point_3_2(request):
# Si estamos identificados devolvemos la portada
if request.user.is_authenticated:
context = {}
dfs=generate_dfs() # df_searches, df_sales, df_products
df_month=show_report_total_monthly_income(dfs[1],dfs[2])
total_anual='{:,.2f}'.format(df_month['total_revenue'].sum())
context = {
'segment': 'point_3_2',
'total_anual': total_anual,
}
return render(request,"core/point_3_2.html",context)
# En otro caso redireccionamos al login
return redirect('/login/')
@csrf_exempt
def point_3_3(request):
# Si estamos identificados devolvemos la portada
if request.user.is_authenticated:
context = {}
dfs=generate_dfs() # df_searches, df_sales, df_products
df_month=show_report_total_monthly_income(dfs[1],dfs[2])
df_month=df_month[['month','total_sales']].sort_values(by=['total_sales'],ascending=False)[:5]
data_result=df_to_json_list(df_month)
context = {
'segment': 'point_3_3',
'data_result': data_result,
}
return render(request,"core/point_3_3.html",context)
# En otro caso redireccionamos al login
return redirect('/login/')
# getResultByCategory
def getResultByCat(request,category):
dfs=generate_dfs() # df_searches, df_sales, df_products
dict_categories=generate_dict_categories(dfs[2])
df_low_sales=reduce_df(dfs[1],'sales',300,True)
df_low_searches=reduce_df(dfs[0],'searches',300,True)
data_lower_sales=get_lowler_df_by_cat(dict_categories[category],df_low_sales,'sales',5)
data_lower_searches=get_lowler_df_by_cat(dict_categories[category],df_low_searches,'searches',10)
data_sales=df_to_json_list(data_lower_sales)
data_searches=df_to_json_list(data_lower_searches)
return JsonResponse({"data_sales":data_sales,"data_searches":data_searches})
# Funciones para DataFrames
def process_report_1(option):
dfs=generate_dfs() # df_searches, df_sales, df_products
if option == 1:
# Mostrar reporte de productos con mayores ventas y busquedas
df_sales=filter_products_searches(dfs[1],dfs[2],'sales',10)
df_searches=filter_products_searches(dfs[0],dfs[2],'searches',10)
return df_sales, df_searches
elif option == 2:
# Proceso para mostrar reportes por categoria con menores ventas y busquedas
category_list=dfs[2]['category'].unique()
return category_list.tolist()
def process_report_2():
dfs=generate_dfs() # df_searches, df_sales, df_products
df_min_max=score_avg_min_max(dfs[1],dfs[2],10)
df_score_max= df_min_max[0].round(decimals = 2)
df_score_min= df_min_max[1].round(decimals = 2)
return df_score_max, df_score_min
def generate_dfs():
# Generamos DataFrames
df_searches = pd.DataFrame(list(Search.objects.all().values()))
df_sales = pd.DataFrame(list(Sale.objects.all().values()))
df_products = pd.DataFrame(list(Product.objects.all().values()))
return df_searches, df_sales, df_products
# Funcion para reducir y contar la frecuencia de un df de acuerdo a la columna de id_product
def reduce_df(df, col_name,count,ascending=False):
df_res=df['id_product'].value_counts(ascending = ascending)[0:count]
return pd.DataFrame({'id_product':df_res.index, col_name:df_res.values})
# Mezcla de dataframes de reduce_df y df_products
def filter_products_searches(df,df_products,col_name,count):
df_result=reduce_df(df,col_name, count)
df_res=pd.merge(df_result, df_products, on="id_product")
return df_res[['id_product','name',col_name,'price','category','stock']]
# Generar un diccionario con las categorias y sus respectivos df
def generate_dict_categories(df_products):
categories=df_products['category'].unique()
dict_categories={}
for category in categories:
dict_categories[category]=df_products.loc[df_products['category'] == category]
return dict_categories
# Mostrar reporte de productos con menores ventas y busquedas por categorias en total son 8
def get_lowler_df_by_cat(df_category,df_reduce, col_name, count):
df_categories_res = pd.merge(df_category, df_reduce, on="id_product")
if df_categories_res.empty:
return 0
else:
df_cat_res=df_categories_res.sort_values(by=[col_name],ascending=True)[0:count]
return df_cat_res[['id_product','name',col_name,'price','category','stock']]
# Generar dataframe de productos con el promedio de reseñas
def score_avg_min_max(df_sales,df_products,count):
data=[]
for i in range(1,len(df_products)+1):
data.append([i, df_sales.loc[df_sales['id_product'] == i]['score'].mean()])
df_score=pd.DataFrame(data,columns=['id_product','score_avg'])
df_score = df_score[df_score['score_avg'].notna()]
df_res=pd.merge(df_score, df_products, on="id_product")
df_score_max=df_res.sort_values('score_avg',ascending=False)[0:count]
df_score_min=df_res.sort_values('score_avg',ascending=True )[0:count]
return df_score_max, df_score_min
# Generar dataframe para ventas totales, promedio, mensuales y anuales.
def show_report_total_monthly_income(df_sales,df_products):
# Dentro del dataframe de df_sales aseguramos que la columna de date sea de tipo datetime
df_sales['date'] = | pd.to_datetime(df_sales['date'],format='%d/%m/%Y') | pandas.to_datetime |
# Author: <NAME>
# Created: 8/26/20, 8:53 PM
import logging
import argparse
import pandas as pd
from typing import *
# noinspection All
from Bio import SeqIO
import pathmagic
# noinspection PyUnresolvedReferences
import mg_log # runs init in mg_log and configures logger
# Custom imports
from mg_container.genome_list import GenomeInfoList, GenomeInfo
from mg_general import Environment, add_env_args_to_parser
# ------------------------------ #
# Parse CMD #
# ------------------------------ #
from mg_general.general import os_join, get_value
from mg_general.genome_splitter import GenomeSplitter
from mg_io.general import mkdir_p, remove_p
from mg_io.labels import read_labels_from_file
from mg_io.shelf import read_sequences_for_gi, read_labels_for_gi
from mg_models.shelf import run_tool
from mg_options.parallelization import ParallelizationOptions
from mg_parallelization.generic_threading import run_n_per_thread
from mg_parallelization.pbs import PBS
from mg_pbs_data.mergers import merge_identity
from mg_pbs_data.splitters import split_gil
from mg_viz.shelf import mkstemp_closed
parser = argparse.ArgumentParser("DRIVER DESCRIPTION.")
parser.add_argument('--pf-gil', required=True)
parser.add_argument('--tool', required=True, choices=["mgm", "mgm2"])
parser.add_argument('--pf-output', required=True)
parser.add_argument('--dn-prefix', default="gct", help="Applies prefix to all run directories")
parser.add_argument('--force-split-in-intergenic', action='store_true')
parser.add_argument('--skip-if-exists', action='store_true')
parser.add_argument('--pf-parallelization-options')
parser.add_argument('--chunk-sizes-nt', nargs="+", default=[250, 500, 750, 1000, 1250, 1500, 1750, 2000, 2250, 2500, 2750, 3000, 5000 ], type=int)
add_env_args_to_parser(parser)
parsed_args = parser.parse_args()
# ------------------------------ #
# Main Code #
# ------------------------------ #
# Load environment variables
my_env = Environment.init_from_argparse(parsed_args)
# Setup logger
logging.basicConfig(level=parsed_args.loglevel)
logger = logging.getLogger("logger") # type: logging.Logger
def get_features_from_prediction(tool, pf_prediction, gcode_true, tag):
# type: (str, str, str, str) -> Dict[str, Dict[str, Any]]
entries = dict()
key_value_delimiters_gff = {
"mgm": "=",
"mgm2": " ",
"gms2": " ",
"mprodigal": "=",
"prodigal": "=",
"fgs": "=",
"mga": "="
}
attribute_delimiter_gff = {
"mgm": ","
}
# update labels file based on offset
labels = read_labels_from_file(pf_prediction, shift=0, key_value_delimiter=key_value_delimiters_gff.get(
tool.lower(), "="
), attribute_delimiter=attribute_delimiter_gff.get(tool.lower()), ignore_partial=False)
labels_per_seqname = dict()
for lab in labels:
if lab.seqname() not in labels_per_seqname:
labels_per_seqname[lab.seqname()] = list()
labels_per_seqname[lab.seqname()].append(lab)
counter = 0
for seqname in labels_per_seqname:
entries[seqname] = dict()
total_score = 0
avg_gene_length = 0
avg_gc = 0
num_genes = 0
for lab in labels_per_seqname[seqname]:
score = lab.get_attribute_value("score")
try:
score = float(score)
total_score += score
except (ValueError, TypeError):
pass
try:
avg_gc += float(lab.get_attribute_value("gc"))
except (ValueError, TypeError):
pass
num_genes += 1
avg_gene_length += abs(lab.right() - lab.left() + 1)
avg_gene_length /= num_genes if num_genes > 0 else 0
avg_gc /= num_genes if num_genes > 0 else 0
entries[seqname] = {
f"{tag}: Total Score": total_score,
f"{tag}: Average Gene Length": avg_gene_length,
f"{tag}: Average Gene GC": avg_gc,
f"{tag}: Number of Genes": num_genes
}
counter += 1
# if counter > 5:
# break
return entries
def build_gcode_features_for_sequence(env, tool, pf_sequences, pf_prediction, **kwargs):
# type: (Environment, str, str, str, Dict[str, Any]) -> pd.DataFrame
gcode_true = get_value(kwargs, "gcode_true", required=True, type=str)
skip_if_exists = get_value(kwargs, "skip_if_exists", False)
run_tool(env, pf_sequences, pf_prediction, tool, gcode=4, pf_mgm=None, fmt="ext", **kwargs)
dict_entries_4 = get_features_from_prediction(tool, pf_prediction, gcode_true, tag="4")
run_tool(env, pf_sequences, pf_prediction, tool, gcode=11, pf_mgm=None, fmt="ext", **kwargs)
dict_entries_11 = get_features_from_prediction(tool, pf_prediction, gcode_true, tag="11")
result = dict()
for seqname in set(dict_entries_4.keys()).union(dict_entries_11.keys()):
d4 = dict() if seqname not in dict_entries_4 else dict_entries_4[seqname]
d11 = dict() if seqname not in dict_entries_11 else dict_entries_11[seqname]
result[seqname] = d4
result[seqname].update(d11)
result[seqname]["True Gcode"] = gcode_true
# sequence stats
record_dict = SeqIO.to_dict(SeqIO.parse(pf_sequences, "fasta"))
for seqname, r in record_dict.items():
if seqname not in result:
continue
result[seqname]["Sequence Length"] = len(r)
result[seqname]["4: Gene Density"] = result[seqname]["4: Number of Genes"] / len(r.seq) if "4: Number of Genes" in result[seqname] else 0
result[seqname]["11: Gene Density"] = result[seqname]["11: Number of Genes"] / len(r.seq) if "11: Number of Genes" in result[seqname] else 0
return pd.DataFrame(result.values())
def build_gcode_features_for_gi_for_chunk(env, gi, tool, chunk, **kwargs):
# type: (Environment, GenomeInfo, str, int, Dict[str, Any]) -> pd.DataFrame
dn_tool = get_value(kwargs, "dn_tool", tool)
dn_prefix = get_value(kwargs, "dn_prefix", "")
dn = tool
gcode_true = gi.genetic_code
# split genome into chunks
gs = GenomeSplitter(
read_sequences_for_gi(env, gi), chunk,
labels=read_labels_for_gi(env, gi),
allow_splits_in_cds=kwargs.get("allow_splits_in_cds")
)
pf_chunks = mkstemp_closed(dir=env["pd-work"], suffix=".fasta")
gs.write_to_file(pf_chunks)
list_entries = list()
pd_run = os_join(env["pd-work"], gi.name, f"{dn_prefix}{dn}_{chunk}")
mkdir_p(pd_run)
pf_prediction = os_join(pd_run, "prediction.gff")
results = build_gcode_features_for_sequence(env, tool, pf_chunks, pf_prediction,
gcode_true=gcode_true, **kwargs)
results["Genome"] = gi.name
list_entries.append(results)
remove_p(pf_prediction)
remove_p(pf_chunks)
return pd.concat(list_entries, ignore_index=True, sort=False)
def build_gcode_features_for_gi(env, gi, tool, chunks, **kwargs):
# type: (Environment, GenomeInfo, str, List[int], Dict[str, Any]) -> pd.DataFrame
list_df = list()
num_processors = get_value(kwargs, "num_processors", 1, valid_type=int)
if num_processors > 1:
list_df = run_n_per_thread(
chunks, build_gcode_features_for_gi_for_chunk, "chunk",
{
"env": env, "gi": gi, "tool": tool, **kwargs
}
)
else:
list_df = list()
for chunk in chunks:
logger.debug(f"{gi.name};{chunk}")
curr = build_gcode_features_for_gi_for_chunk(env, gi, tool, chunk, **kwargs)
list_df.append(curr)
return | pd.concat(list_df, ignore_index=True, sort=False) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Chapters 27.2-3 from
http://www.kevinsheppard.com/images/0/09/Python_introduction.pdf
CAY risk factor:
http://faculty.haas.berkeley.edu/lettau/data/cay_q_13Q3.txt
Fama-French risk factors:
http://www.kevinsheppard.com/images/0/0b/FamaFrench.zip
GMM estimator is located here:
https://github.com/khrapovs/MyGMM
Fama-McBeth estimation library is here:
https://github.com/khrapovs/famamcbeth
"""
import pandas as pd
import numpy as np
import datetime as dt
import matplotlib.pylab as plt
import seaborn as sns
from famamcbeth import FamaMcBeth
def import_data():
parse = lambda x: dt.datetime.strptime(x, '%Y%m')
date_name = 'date'
factor_names = ['VWMe', 'SMB', 'HML']
rf_name = 'RF'
data = pd.read_csv('../data/FamaFrench.csv', index_col=date_name,
parse_dates=[date_name], date_parser=parse)
riskfree = data[[rf_name]].values
factors = data[factor_names].values
# Augment factors with the constant
factors = np.hstack((np.ones_like(riskfree), factors))
portfolios = data[data.columns - factor_names - [rf_name]].values
excess_ret = portfolios - riskfree
return factors, excess_ret
def import_cay():
import calendar
calendar.monthrange(2002,1)
parse = lambda x: dt.datetime.strptime(x, '%Y\:%q')
date_name = 'date'
def parse(value):
year = int(value[:4])
month = 3*int(value[5:])
day = calendar.monthrange(year, month)[1]
return dt.datetime(year, month, day)
cay = pd.read_csv('../data/cay_q_13Q3.csv', index_col=date_name,
parse_dates=[date_name], date_parser=parse)[['cay']]
parse = lambda x: dt.datetime.strptime(x, '%Y%m')
date_name = 'date'
rf_name = 'RF'
data = pd.read_csv('../data/FamaFrench.csv', index_col=date_name,
parse_dates=[date_name], date_parser=parse)
ff_factors = data.resample('Q').mean()
data = | pd.merge(cay, ff_factors, left_index=True, right_index=True) | pandas.merge |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.transforms import offset_copy
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import simplekml
import geopy
from geopy.distance import geodesic
#######################################################################################################################################
__all__ = ['MakeCoords']
class MakeCoords():
def __init__(self, x, y, z, theta, phi, latitude, longitude):
'''
x: array
2D meshgrid array containing all the x position data
y: array
2D meshgrid array containing all the y position data
z: array
2D meshgrid array containing all the z position data
theta: array
2D meshgrid array containing spherically parametrized location data
phi: array
2D meshgrid array containing spherically parametrized location data
latitude: float
the location where we want the dome to be centered IN DECIMAL DEGREES
longitude: float
the location where we want the dome to be centered IN DECIMAL DEGREES
THE USER SHOULD ONLY INTERACT WITH THE make_lat_lon METHODS
'''
self.latitude = latitude
self.longitude = longitude
self.ring_coord_list = self.extract_ring_coordinate_sets(x, y, z, theta, phi)
self.arch_coord_list = self.extract_arch_coordinate_sets(x, y, z, theta, phi)
self.df_ring = self.make_ring_df()
self.df_ring = self.distance_between_ring_points()
self.df_arch = self.make_arch_df()
self.df_arch = self.distance_between_arch_points()
################################################################################################################################
# MAKE THE RING PASSES
def xy_at_ring(self, level, x, y, z, theta, phi):
# extracts the location data from the meshgrid format to give it in ordered coordinate sets for every point at a given z
ring_coords = [[i, j, k, t, p] for i,j,k,t,p in zip(x[level], y[level], z[level], theta[level], phi[level])]
return ring_coords
def extract_ring_coordinate_sets(self, x, y, z, theta, phi):
# creates a list of all the coordinate points in ordered sets
temp_list = []
for p in range(len(phi)):
ring_coords = self.xy_at_ring(p, x, y, z, theta, phi)
temp_list.append(ring_coords)
ring_coord_list = np.concatenate(temp_list)
return ring_coord_list
################################################################################################################################
# MAKE THE ARCH PASSES
def xy_at_arch(self, arch, x, y, z, theta, phi):
arch_coords = [[i, j, k, t, p] for i,j,k,t,p in zip(x[:,arch], y[:,arch], z[:,arch], theta[:,arch], phi[:,arch])]
return arch_coords
def extract_arch_coordinate_sets(self, x, y, z, theta, phi):
# creates a list of all the coordinate points in ordered sets
temp_list = []
for t in range(len(theta[0])):
arch_coords = self.xy_at_arch(t, x, y, z, theta, phi)
if t %2 == 1:
temp_list.append(np.flip(arch_coords, axis=0).tolist())
else:
temp_list.append(arch_coords)
arch_coord_list = np.concatenate(temp_list)
return arch_coord_list
################################################################################################################################
# MAKE THE RING DATAFRAME
def make_ring_df(self):
# turning the list of coordinate points into a pandas dataframe for ease of use
df_ring = pd.DataFrame(self.ring_coord_list, columns=['x', 'y', 'z', 'theta', 'phi'])
return df_ring
def distance_between_ring_points(self):
dist_list = []
for i in range(len(self.df_ring)):
#dist = np.sqrt((self.df_ring.x[i] - self.df_ring.x[0])**2 + (self.df_ring.y[i] - self.df_ring.y[0])**2)
dist = np.sqrt((self.df_ring.x[i] - 0)**2 + (self.df_ring.y[i] - 0)**2)
dist_list.append(dist)
self.df_ring['dist_from_center_km'] = np.array(dist_list)/1000
df_ring = self.df_ring
return df_ring
def make_lat_lon_ring(self):
# transform dome coords from arbitrary xyz space into lat lon
lat_list = []
lon_list = []
for i in range(len(self.df_ring)):
origin = geopy.Point(self.latitude, self.longitude)
destination = geodesic(kilometers=self.df_ring.dist_from_center_km[i]).destination(origin, np.degrees(self.df_ring.theta[i]))
new_lat, new_lon = destination.latitude, destination.longitude
lat_list.append(new_lat)
lon_list.append(new_lon)
self.df_ring['latitude'] = lat_list
self.df_ring['longitude'] = lon_list
df_ring = self.df_ring
print('NUMBER OF WAYPOINTS = ' + str(len(df_ring)))
return df_ring
################################################################################################################################
# MAKE THE ARCH DATAFRAME
def make_arch_df(self):
# turning the list of coordinate points into a pandas dataframe for ease of use
df_arch = | pd.DataFrame(self.arch_coord_list, columns=['x', 'y', 'z', 'theta', 'phi']) | pandas.DataFrame |
"""Thermal grid models module."""
import itertools
from multimethod import multimethod
import numpy as np
import pandas as pd
import scipy.constants
import scipy.sparse as sp
import scipy.sparse.linalg
import typing
import mesmo.config
import mesmo.data_interface
import mesmo.der_models
import mesmo.solutions
import mesmo.utils
logger = mesmo.config.get_logger(__name__)
class ThermalGridModel(mesmo.utils.ObjectBase):
"""Thermal grid model object."""
timesteps: pd.Index
node_names: pd.Index
line_names: pd.Index
der_names: pd.Index
der_types: pd.Index
nodes: pd.Index
branches: pd.Index
branch_loops: pd.Index
ders: pd.Index
branch_incidence_1_matrix: sp.spmatrix
branch_incidence_2_matrix: sp.spmatrix
branch_incidence_matrix: sp.spmatrix
branch_incidence_matrix_no_source_no_loop: sp.spmatrix
branch_incidence_matrix_no_source_loop: sp.spmatrix
branch_loop_incidence_matrix: sp.spmatrix
der_node_incidence_matrix: sp.spmatrix
der_thermal_power_vector_reference: np.ndarray
branch_flow_vector_reference: np.ndarray
node_head_vector_reference: np.ndarray
# TODO: Revise / reduce use of parameter attributes if possible.
line_parameters: pd.DataFrame
energy_transfer_station_head_loss: float
enthalpy_difference_distribution_water: float
distribution_pump_efficiency: float
source_der_model: mesmo.der_models.DERModel
plant_efficiency: float
def __init__(self, scenario_name: str):
# Obtain thermal grid data.
thermal_grid_data = mesmo.data_interface.ThermalGridData(scenario_name)
# Obtain index set for time steps.
# - This is needed for optimization problem definitions within linear thermal grid models.
self.timesteps = thermal_grid_data.scenario_data.timesteps
# Obtain node / line / DER names.
self.node_names = pd.Index(thermal_grid_data.thermal_grid_nodes["node_name"])
self.line_names = pd.Index(thermal_grid_data.thermal_grid_lines["line_name"])
self.der_names = pd.Index(thermal_grid_data.thermal_grid_ders["der_name"])
self.der_types = pd.Index(thermal_grid_data.thermal_grid_ders["der_type"]).unique()
# Obtain node / branch / DER index set.
nodes = pd.concat(
[
thermal_grid_data.thermal_grid_nodes.loc[:, "node_name"]
.apply(
# Obtain `node_type` column.
lambda value: "source"
if value == thermal_grid_data.thermal_grid.at["source_node_name"]
else "no_source"
)
.rename("node_type"),
thermal_grid_data.thermal_grid_nodes.loc[:, "node_name"],
],
axis="columns",
)
self.nodes = pd.MultiIndex.from_frame(nodes)
self.branches = pd.MultiIndex.from_product([self.line_names, ["no_loop"]], names=["branch_name", "loop_type"])
self.branch_loops = pd.MultiIndex.from_tuples([], names=["loop_id", "branch_name"]) # Values are filled below.
self.ders = pd.MultiIndex.from_frame(thermal_grid_data.thermal_grid_ders[["der_type", "der_name"]])
# Instantiate branch-to-node incidence matrices.
self.branch_incidence_1_matrix = sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=int)
self.branch_incidence_2_matrix = sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=int)
# Add lines to branch incidence matrices and identify any loops.
# - Uses temporary node tree variable to track construction of the network and identify any loops / cycles.
branches_loops = self.branches.to_frame()
node_trees = []
for line_index, line in thermal_grid_data.thermal_grid_lines.iterrows():
# Obtain indexes for positioning the line in the incidence matrices.
node_index_1 = mesmo.utils.get_index(self.nodes, node_name=line["node_1_name"])
node_index_2 = mesmo.utils.get_index(self.nodes, node_name=line["node_2_name"])
branch_index = mesmo.utils.get_index(self.branches, branch_name=line["line_name"])
# Insert connection indicators into incidence matrices.
self.branch_incidence_1_matrix[np.ix_(branch_index, node_index_1)] += 1
self.branch_incidence_2_matrix[np.ix_(branch_index, node_index_2)] += 1
# Check if node 1 or node 2 are in any node trees.
node_tree_index_1 = None
node_tree_index_2 = None
for node_tree_index, node_tree in enumerate(node_trees):
if line["node_1_name"] in node_tree:
node_tree_index_1 = node_tree_index
if line["node_2_name"] in node_tree:
node_tree_index_2 = node_tree_index
if (node_tree_index_1 is None) and (node_tree_index_2 is None):
# Create new tree, if neither node is on any tree.
node_trees.append([line["node_1_name"], line["node_2_name"]])
elif (node_tree_index_1 is not None) and (node_tree_index_2 is None):
# Add node to tree, if other node is on any tree.
node_trees[node_tree_index_1].append(line["node_2_name"])
elif (node_tree_index_1 is None) and (node_tree_index_2 is not None):
# Add node to tree, if other node is on any tree.
node_trees[node_tree_index_2].append(line["node_1_name"])
else:
if node_tree_index_1 == node_tree_index_2:
# Mark branch as loop, if both nodes are in the same tree.
branches_loops.at[self.branches[branch_index], "loop_type"] = "loop"
else:
# Merge trees, if the branch connects nodes on different trees.
node_trees[node_tree_index_1].extend(node_trees[node_tree_index_2])
node_trees[node_tree_index_2] = []
# Update branch / loop indexes.
self.branches = pd.MultiIndex.from_frame(branches_loops)
self.branch_loops = pd.MultiIndex.from_frame(
pd.concat(
[
pd.Series(range(sum(branches_loops.loc[:, "loop_type"] == "loop")), name="loop_id", dtype=int),
branches_loops.loc[branches_loops.loc[:, "loop_type"] == "loop", "branch_name"].reset_index(
drop=True
),
],
axis="columns",
)
)
# Raise errors on invalid network configurations.
node_trees = [node_tree for node_tree in node_trees if len(node_tree) > 0]
if len(node_trees) > 1:
raise ValueError(
f"The thermal grid contains disjoint sections of nodes:"
+ "".join(
[
f"\nSection {node_tree_index}: {node_tree}"
for node_tree_index, node_tree in enumerate(node_trees)
]
)
)
elif len(node_trees[0]) != len(self.node_names):
raise ValueError(
f"The thermal grid contains disconnected nodes:\n"
f"{[node_name for node_name in self.node_names if node_name not in node_trees[0]]}"
)
# Obtained combined branch incidence matrix.
self.branch_incidence_matrix = self.branch_incidence_1_matrix - self.branch_incidence_2_matrix
# Convert DOK matrices to CSR matrices.
self.branch_incidence_1_matrix = self.branch_incidence_1_matrix.tocsr()
self.branch_incidence_2_matrix = self.branch_incidence_2_matrix.tocsr()
self.branch_incidence_matrix = self.branch_incidence_matrix.tocsr()
# Obtain shorthand definitions.
self.branch_incidence_matrix_no_source_no_loop = self.branch_incidence_matrix[
np.ix_(
mesmo.utils.get_index(self.branches, loop_type="no_loop"),
mesmo.utils.get_index(self.nodes, node_type="no_source"),
)
]
self.branch_incidence_matrix_no_source_loop = self.branch_incidence_matrix[
np.ix_(
mesmo.utils.get_index(self.branches, loop_type="loop", raise_empty_index_error=False),
mesmo.utils.get_index(self.nodes, node_type="no_source"),
)
]
# Obtain branch-to-loop incidence matrix.
self.branch_loop_incidence_matrix = sp.vstack(
[
-1.0 * sp.linalg.inv(self.branch_incidence_matrix_no_source_no_loop.transpose())
# Using `sp.linalg.inv()` instead of `sp.linalg.spsolve()` to preserve dimensions in all cases.
@ self.branch_incidence_matrix_no_source_loop.transpose(),
sp.eye(len(self.branch_loops)),
]
).tocsr()
# Instantiate DER-to-node incidence matrix.
self.der_node_incidence_matrix = sp.dok_matrix((len(self.nodes), len(self.ders)), dtype=int)
# Add DERs into DER incidence matrix.
for der_name, der in thermal_grid_data.thermal_grid_ders.iterrows():
# Obtain indexes for positioning the DER in the incidence matrix.
node_index = mesmo.utils.get_index(self.nodes, node_name=der["node_name"])
der_index = mesmo.utils.get_index(self.ders, der_name=der["der_name"])
# Insert connection indicator into incidence matrices.
self.der_node_incidence_matrix[node_index, der_index] = 1
# Convert DOK matrices to CSR matrices.
self.der_node_incidence_matrix = self.der_node_incidence_matrix.tocsr()
# Obtain DER nominal thermal power vector.
self.der_thermal_power_vector_reference = thermal_grid_data.thermal_grid_ders.loc[
:, "thermal_power_nominal"
].values
# Obtain nominal branch flow vector.
self.branch_flow_vector_reference = (
np.pi
* (thermal_grid_data.thermal_grid_lines.loc[:, "diameter"].values / 2) ** 2
* thermal_grid_data.thermal_grid_lines.loc[:, "maximum_velocity"].values
)
# Obtain nominal branch flow vector.
# TODO: Define proper node head reference vector.
self.node_head_vector_reference = np.ones(len(self.nodes))
# Obtain line parameters.
self.line_parameters = thermal_grid_data.thermal_grid_lines.loc[:, ["length", "diameter", "absolute_roughness"]]
# Obtain other system parameters.
self.energy_transfer_station_head_loss = float(
thermal_grid_data.thermal_grid["energy_transfer_station_head_loss"]
)
self.enthalpy_difference_distribution_water = float(
thermal_grid_data.thermal_grid["enthalpy_difference_distribution_water"]
)
self.distribution_pump_efficiency = float(thermal_grid_data.thermal_grid["distribution_pump_efficiency"])
# Obtain DER model source node.
# TODO: Use state space model for simulation / optimization.
self.source_der_model = mesmo.der_models.make_der_model(
thermal_grid_data.thermal_grid.at["source_der_model_name"], thermal_grid_data.der_data, is_standalone=True
)
# TODO: Remove temporary workaround: Obtain efficiency factors.
if thermal_grid_data.thermal_grid.at["source_der_type"] == "cooling_plant":
self.plant_efficiency = self.source_der_model.cooling_plant_efficiency
elif thermal_grid_data.thermal_grid.at["source_der_type"] == "heating_plant":
self.plant_efficiency = self.source_der_model.thermal_efficiency
else:
raise ValueError(f"Incompatible der model type: {thermal_grid_data.thermal_grid.at['source_der_type']}")
# Define shorthands for no-source / source variables.
# TODO: Add in class documentation.
# TODO: Replace local variables in power flow / linear models.
node_incidence_matrix = sp.identity(len(self.nodes)).tocsr()
self.node_incidence_matrix_no_source = node_incidence_matrix[
np.ix_(range(len(self.nodes)), mesmo.utils.get_index(self.nodes, node_type="no_source"))
]
self.node_incidence_matrix_source = node_incidence_matrix[
np.ix_(range(len(self.nodes)), mesmo.utils.get_index(self.nodes, node_type="source"))
]
self.der_node_incidence_matrix_no_source = self.der_node_incidence_matrix[
np.ix_(mesmo.utils.get_index(self.nodes, node_type="no_source"), range(len(self.ders)))
]
self.branch_incidence_matrix_no_source = self.branch_incidence_matrix[
np.ix_(range(len(self.branches)), mesmo.utils.get_index(self.nodes, node_type="no_source"))
]
self.branch_incidence_matrix_source = self.branch_incidence_matrix[
np.ix_(range(len(self.branches)), mesmo.utils.get_index(self.nodes, node_type="source"))
]
self.node_head_vector_reference_no_source = self.node_head_vector_reference[
mesmo.utils.get_index(self.nodes, node_type="no_source")
]
self.node_head_vector_reference_source = self.node_head_vector_reference[
mesmo.utils.get_index(self.nodes, node_type="source")
]
def get_branch_loss_coefficient_vector(self, branch_flow_vector: np.ndarray):
# Obtain branch velocity vector.
branch_velocity_vector = (
4.0 * branch_flow_vector / (np.pi * self.line_parameters.loc[:, "diameter"].values ** 2)
)
# Obtain branch Reynolds coefficient vector.
branch_reynold_vector = (
np.abs(branch_velocity_vector)
* self.line_parameters.loc[:, "diameter"].values
/ mesmo.config.water_kinematic_viscosity
)
# Obtain branch friction factor vector.
@np.vectorize
def branch_friction_factor_vector(reynold, absolute_roughness, diameter):
# No flow.
if reynold == 0:
friction_factor = 0
# Laminar Flow, based on Hagen-Poiseuille velocity profile, analytical correlation.
elif 0 < reynold < 4000:
friction_factor = 64 / reynold
# Turbulent flow, Swamee-Jain formula, approximating correlation of Colebrook-White equation.
elif 4000 <= reynold:
if not (reynold <= 100000000 and 0.000001 <= ((absolute_roughness / 1000) / diameter) <= 0.01):
logger.warning(
"Exceeding validity range of Swamee-Jain formula for calculation of friction factor."
)
friction_factor = (
1.325 / (np.log((absolute_roughness / 1000) / (3.7 * diameter) + 5.74 / (reynold**0.9))) ** 2
)
else:
raise ValueError(f"Invalid Reynolds coefficient: {reynold}")
# Convert from 1/m to 1/km.
friction_factor *= 1.0e3
return friction_factor
# Obtain branch head loss coefficient vector.
branch_loss_coefficient_vector = (
branch_friction_factor_vector(
branch_reynold_vector,
self.line_parameters.loc[:, "absolute_roughness"].values,
self.line_parameters.loc[:, "diameter"].values,
)
* 8.0
* self.line_parameters.loc[:, "length"].values
/ (
mesmo.config.gravitational_acceleration
* self.line_parameters.loc[:, "diameter"].values ** 5
* np.pi**2
)
)
return branch_loss_coefficient_vector
class ThermalGridDEROperationResults(mesmo.utils.ResultsBase):
der_thermal_power_vector: pd.DataFrame
der_thermal_power_vector_per_unit: pd.DataFrame
class ThermalGridOperationResults(ThermalGridDEROperationResults):
thermal_grid_model: ThermalGridModel
node_head_vector: pd.DataFrame
node_head_vector_per_unit: pd.DataFrame
branch_flow_vector: pd.DataFrame
branch_flow_vector_per_unit: pd.DataFrame
pump_power: pd.DataFrame
class ThermalGridDLMPResults(mesmo.utils.ResultsBase):
thermal_grid_energy_dlmp_node_thermal_power: pd.DataFrame
thermal_grid_head_dlmp_node_thermal_power: pd.DataFrame
thermal_grid_congestion_dlmp_node_thermal_power: pd.DataFrame
thermal_grid_pump_dlmp_node_thermal_power: pd.DataFrame
thermal_grid_total_dlmp_node_thermal_power: pd.DataFrame
thermal_grid_energy_dlmp_der_thermal_power: pd.DataFrame
thermal_grid_head_dlmp_der_thermal_power: pd.DataFrame
thermal_grid_congestion_dlmp_der_thermal_power: pd.DataFrame
thermal_grid_pump_dlmp_der_thermal_power: pd.DataFrame
thermal_grid_total_dlmp_der_thermal_power: pd.DataFrame
thermal_grid_total_dlmp_price_timeseries: pd.DataFrame
class ThermalPowerFlowSolutionBase(mesmo.utils.ObjectBase):
"""Thermal grid power flow solution object."""
der_thermal_power_vector: np.ndarray
node_head_vector: np.ndarray
branch_flow_vector: np.ndarray
pump_power: float
@multimethod
def __init__(self, scenario_name: str):
# Obtain thermal grid model.
thermal_grid_model = ThermalGridModel(scenario_name)
self.__init__(thermal_grid_model)
@multimethod
def __init__(self, thermal_grid_model: ThermalGridModel):
# Obtain DER thermal power vector.
der_thermal_power_vector = thermal_grid_model.der_thermal_power_vector_reference
self.__init__(thermal_grid_model, der_thermal_power_vector)
@multimethod
def __init__(self, thermal_grid_model: ThermalGridModel, der_thermal_power_vector: np.ndarray):
raise NotImplementedError
class ThermalPowerFlowSolutionExplicit(ThermalPowerFlowSolutionBase):
# Enable calls to `__init__` method definitions in parent class.
@multimethod
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@multimethod
def __init__(self, thermal_grid_model: ThermalGridModel, der_thermal_power_vector: np.ndarray):
# Obtain DER thermal power vector.
self.der_thermal_power_vector = der_thermal_power_vector.ravel()
# Define shorthand for DER volume flow vector.
der_flow_vector = (
self.der_thermal_power_vector
/ mesmo.config.water_density
/ thermal_grid_model.enthalpy_difference_distribution_water
)
# Obtain branch volume flow vector.
self.branch_flow_vector = (
scipy.sparse.linalg.spsolve(
thermal_grid_model.branch_incidence_matrix[
:, mesmo.utils.get_index(thermal_grid_model.nodes, node_type="no_source")
].transpose(),
thermal_grid_model.der_node_incidence_matrix[
mesmo.utils.get_index(thermal_grid_model.nodes, node_type="no_source"), :
]
@ np.transpose([der_flow_vector]),
)
).ravel()
# Obtain node head vector.
self.node_head_vector = np.zeros(len(thermal_grid_model.nodes), dtype=float)
self.node_head_vector[
mesmo.utils.get_index(thermal_grid_model.nodes, node_type="no_source")
] = scipy.sparse.linalg.spsolve(
thermal_grid_model.branch_incidence_matrix[
:, mesmo.utils.get_index(thermal_grid_model.nodes, node_type="no_source")
].tocsc(),
(
thermal_grid_model.get_branch_loss_coefficient_vector(self.branch_flow_vector)
* self.branch_flow_vector
* np.abs(self.branch_flow_vector) # TODO: Check if absolute value needed.
),
)
# Obtain pump power loss.
self.pump_power = (
(2.0 * np.max(np.abs(self.node_head_vector)) + thermal_grid_model.energy_transfer_station_head_loss)
* -1.0
* np.sum(der_flow_vector) # Source volume flow.
* mesmo.config.water_density
* mesmo.config.gravitational_acceleration
/ thermal_grid_model.distribution_pump_efficiency
)
class ThermalPowerFlowSolutionNewtonRaphson(ThermalPowerFlowSolutionBase):
# Enable calls to `__init__` method definitions in parent class.
@multimethod
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@multimethod
def __init__(
self,
thermal_grid_model: ThermalGridModel,
der_thermal_power_vector: np.ndarray,
head_iteration_limit=100,
head_tolerance=1e-2,
):
# Obtain DER thermal power vector.
self.der_thermal_power_vector = der_thermal_power_vector.ravel()
# Define shorthand for DER volume flow vector.
der_flow_vector = (
self.der_thermal_power_vector
/ mesmo.config.water_density
/ thermal_grid_model.enthalpy_difference_distribution_water
)
# Obtain nodal power vector.
node_flow_vector_no_source = (
thermal_grid_model.der_node_incidence_matrix_no_source @ np.transpose([der_thermal_power_vector])
).ravel()
# Obtain initial nodal power and voltage vectors, assuming no load and no injection.
# TODO: Enable passing previous solution for initialization.
node_flow_vector_initial_no_source = np.zeros(node_flow_vector_no_source.shape)
node_head_vector_initial_no_source = thermal_grid_model.node_head_vector_reference_no_source.copy()
branch_flow_vector_initial = thermal_grid_model.branch_flow_vector_reference.copy()
branch_loss_coefficient_vector_initial = thermal_grid_model.get_branch_loss_coefficient_vector(
branch_flow_vector_initial
)
# Define nodal power vector candidate to the desired nodal power vector.
node_flow_vector_candidate_no_source = node_flow_vector_initial_no_source.copy()
# Instantiate Newton-Raphson iteration variables.
head_iteration = 0
head_change = np.inf
# Run Newton-Raphson iterations.
while (head_iteration < head_iteration_limit) & (head_change > head_tolerance):
node_head_vector_estimate_no_source = scipy.sparse.linalg.spsolve(
(
np.transpose(thermal_grid_model.branch_incidence_matrix_no_source)
@ (
0.5
* sp.diags(branch_flow_vector_initial**-1)
@ sp.diags(branch_loss_coefficient_vector_initial**-1)
)
@ thermal_grid_model.branch_incidence_matrix_no_source
),
(
np.transpose(thermal_grid_model.branch_incidence_matrix_no_source)
@ (0.5 * (branch_flow_vector_initial**-1))
- np.transpose(thermal_grid_model.branch_incidence_matrix_no_source)
@ (
0.5
* sp.diags(branch_flow_vector_initial**-1)
@ sp.diags(branch_loss_coefficient_vector_initial**-1)
)
@ thermal_grid_model.branch_incidence_matrix_source
@ thermal_grid_model.node_head_vector_reference_source
+ node_flow_vector_candidate_no_source
),
)
node_head_vector_estimate = (
thermal_grid_model.node_incidence_matrix_no_source @ node_head_vector_estimate_no_source
+ thermal_grid_model.node_incidence_matrix_source @ thermal_grid_model.node_head_vector_reference_source
)
branch_flow_vector_estimate = (
0.5 * branch_flow_vector_initial
- (
0.5
* sp.diags(branch_flow_vector_initial**-1)
@ sp.diags(branch_loss_coefficient_vector_initial**-1)
)
@ thermal_grid_model.branch_incidence_matrix
@ node_head_vector_estimate
)
head_change = np.max(np.abs(node_head_vector_estimate_no_source - node_head_vector_initial_no_source))
node_head_vector_initial_no_source = node_head_vector_estimate_no_source.copy()
branch_flow_vector_initial = branch_flow_vector_estimate.copy()
branch_loss_coefficient_vector_initial = thermal_grid_model.get_branch_loss_coefficient_vector(
branch_flow_vector_initial
)
head_iteration += 1
# For fixed-point algorithm, reaching the iteration limit is considered undesired and triggers a warning
if head_iteration >= head_iteration_limit:
logger.warning(
"Newton-Raphson solution algorithm reached " f"maximum limit of {head_iteration_limit} iterations."
)
# Obtain node head vector.
self.node_head_vector = node_head_vector_estimate
# Obtain branch volume flow vector.
self.branch_flow_vector = branch_flow_vector_estimate
# Obtain pump power loss.
self.pump_power = (
(2.0 * np.max(np.abs(self.node_head_vector)) + thermal_grid_model.energy_transfer_station_head_loss)
* -1.0
* np.sum(der_flow_vector) # Source volume flow.
* mesmo.config.water_density
* mesmo.config.gravitational_acceleration
/ thermal_grid_model.distribution_pump_efficiency
)
class ThermalPowerFlowSolution(ThermalPowerFlowSolutionBase):
"""Thermal grid power flow solution object."""
# Enable calls to `__init__` method definitions in parent class.
@multimethod
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@multimethod
def __init__(self, thermal_grid_model: ThermalGridModel, der_thermal_power_vector: np.ndarray):
# Select power flow solution method, depending on whether network is radial or meshed.
if len(thermal_grid_model.branch_loops) == 0:
# Use explicit thermal power flow solution method.
ThermalPowerFlowSolutionExplicit.__init__(self, thermal_grid_model, der_thermal_power_vector)
else:
raise NotImplementedError("Thermal power flow solution for meshed networks has not yet been implemented.")
class ThermalPowerFlowSolutionSet(mesmo.utils.ObjectBase):
power_flow_solutions: typing.Dict[pd.Timestamp, ThermalPowerFlowSolution]
thermal_grid_model: ThermalGridModel
der_thermal_power_vector: pd.DataFrame
timesteps: pd.Index
@multimethod
def __init__(
self, thermal_grid_model: ThermalGridModel, der_operation_results: ThermalGridDEROperationResults, **kwargs
):
der_thermal_power_vector = der_operation_results.der_thermal_power_vector
self.__init__(thermal_grid_model, der_thermal_power_vector, **kwargs)
@multimethod
def __init__(
self,
thermal_grid_model: ThermalGridModel,
der_thermal_power_vector: pd.DataFrame,
power_flow_solution_method=ThermalPowerFlowSolution,
):
# Store attributes.
self.thermal_grid_model = thermal_grid_model
self.der_thermal_power_vector = der_thermal_power_vector
self.timesteps = self.thermal_grid_model.timesteps
# Obtain power flow solutions.
power_flow_solutions = mesmo.utils.starmap(
power_flow_solution_method, zip(itertools.repeat(self.thermal_grid_model), der_thermal_power_vector.values)
)
self.power_flow_solutions = dict(zip(self.timesteps, power_flow_solutions))
def get_results(self) -> ThermalGridOperationResults:
raise NotImplementedError
class LinearThermalGridModel(mesmo.utils.ObjectBase):
"""Linear thermal grid model object."""
thermal_grid_model: ThermalGridModel
thermal_power_flow_solution: ThermalPowerFlowSolution
sensitivity_branch_flow_by_node_power: sp.spmatrix
sensitivity_branch_flow_by_der_power: sp.spmatrix
sensitivity_node_head_by_node_power: sp.spmatrix
sensitivity_node_head_by_der_power: sp.spmatrix
sensitivity_pump_power_by_node_power: np.array
sensitivity_pump_power_by_der_power: np.array
@multimethod
def __init__(
self,
scenario_name: str,
):
# Obtain thermal grid model.
thermal_grid_model = ThermalGridModel(scenario_name)
# Obtain DER power vector.
der_thermal_power_vector = thermal_grid_model.der_thermal_power_vector_reference
# Obtain thermal power flow solution.
thermal_power_flow_solution = ThermalPowerFlowSolution(thermal_grid_model, der_thermal_power_vector)
self.__init__(thermal_grid_model, thermal_power_flow_solution)
@multimethod
def __init__(
self,
thermal_grid_model: ThermalGridModel,
thermal_power_flow_solution: ThermalPowerFlowSolution,
):
# Store thermal grid model.
self.thermal_grid_model = thermal_grid_model
# Store thermal power flow solution.
self.thermal_power_flow_solution = thermal_power_flow_solution
# Obtain inverse / transpose incidence matrices.
node_index_no_source = mesmo.utils.get_index(
self.thermal_grid_model.nodes, node_type="no_source"
) # Define shorthand.
branch_node_incidence_matrix_inverse = sp.dok_matrix(
(len(self.thermal_grid_model.branches), len(self.thermal_grid_model.nodes)), dtype=float
)
branch_node_incidence_matrix_inverse = sp.dok_matrix(
(len(self.thermal_grid_model.branches), len(self.thermal_grid_model.nodes)), dtype=float
)
branch_node_incidence_matrix_inverse[
np.ix_(range(len(self.thermal_grid_model.branches)), node_index_no_source)
] = scipy.sparse.linalg.inv(
self.thermal_grid_model.branch_incidence_matrix[:, node_index_no_source].transpose()
)
branch_node_incidence_matrix_inverse = branch_node_incidence_matrix_inverse.tocsr()
branch_node_incidence_matrix_transpose_inverse = sp.dok_matrix(
(len(self.thermal_grid_model.nodes), len(self.thermal_grid_model.branches)), dtype=float
)
branch_node_incidence_matrix_transpose_inverse[
np.ix_(node_index_no_source, range(len(self.thermal_grid_model.branches)))
] = scipy.sparse.linalg.inv(self.thermal_grid_model.branch_incidence_matrix[:, node_index_no_source].tocsc())
branch_node_incidence_matrix_transpose_inverse = branch_node_incidence_matrix_transpose_inverse.tocsr()
der_node_incidence_matrix_transpose = np.transpose(self.thermal_grid_model.der_node_incidence_matrix)
# Obtain sensitivity matrices.
self.sensitivity_node_power_by_der_power = self.thermal_grid_model.der_node_incidence_matrix
self.sensitivity_branch_flow_by_node_power = (
branch_node_incidence_matrix_inverse
/ mesmo.config.water_density
/ self.thermal_grid_model.enthalpy_difference_distribution_water
)
self.sensitivity_branch_flow_by_der_power = (
self.sensitivity_branch_flow_by_node_power @ self.sensitivity_node_power_by_der_power
)
self.sensitivity_node_head_by_node_power = (
branch_node_incidence_matrix_transpose_inverse
@ sp.diags(
np.abs(thermal_power_flow_solution.branch_flow_vector)
* thermal_grid_model.get_branch_loss_coefficient_vector(thermal_power_flow_solution.branch_flow_vector)
)
@ self.sensitivity_branch_flow_by_node_power
)
self.sensitivity_node_head_by_der_power = (
self.sensitivity_node_head_by_node_power @ self.sensitivity_node_power_by_der_power
)
self.sensitivity_pump_power_by_node_power = (
(
-1.0
* thermal_power_flow_solution.der_thermal_power_vector
/ mesmo.config.water_density
/ thermal_grid_model.enthalpy_difference_distribution_water
) # DER volume flow vector.
@ (-2.0 * der_node_incidence_matrix_transpose)
@ self.sensitivity_node_head_by_node_power
* mesmo.config.water_density
* mesmo.config.gravitational_acceleration
/ self.thermal_grid_model.distribution_pump_efficiency
) + (
-1.0
* self.thermal_grid_model.energy_transfer_station_head_loss
* mesmo.config.gravitational_acceleration
/ self.thermal_grid_model.enthalpy_difference_distribution_water
/ self.thermal_grid_model.distribution_pump_efficiency
)
self.sensitivity_pump_power_by_der_power = np.array(
[self.sensitivity_pump_power_by_node_power @ self.sensitivity_node_power_by_der_power]
)
# TODO: Split global / local approximation methods.
LinearThermalGridModelGlobal = LinearThermalGridModel
class LinearThermalGridModelSet(mesmo.utils.ObjectBase):
linear_thermal_grid_models: typing.Dict[pd.Timestamp, LinearThermalGridModel]
thermal_grid_model: ThermalGridModel
timesteps: pd.Index
@multimethod
def __init__(
self,
thermal_grid_model: ThermalGridModel,
thermal_power_flow_solution_set: ThermalPowerFlowSolutionSet,
linear_thermal_grid_model_method: typing.Type[LinearThermalGridModel] = LinearThermalGridModelGlobal,
):
self.check_linear_thermal_grid_model_method(linear_thermal_grid_model_method)
# Obtain linear thermal grid models.
linear_thermal_grid_models = mesmo.utils.starmap(
linear_thermal_grid_model_method,
zip(itertools.repeat(thermal_grid_model), thermal_power_flow_solution_set.power_flow_solutions.values()),
)
linear_thermal_grid_models = dict(zip(thermal_grid_model.timesteps, linear_thermal_grid_models))
self.__init__(thermal_grid_model, linear_thermal_grid_models)
@multimethod
def __init__(
self,
thermal_grid_model: ThermalGridModel,
thermal_power_flow_solution: ThermalPowerFlowSolution,
linear_thermal_grid_model_method: typing.Type[LinearThermalGridModel] = LinearThermalGridModelGlobal,
):
self.check_linear_thermal_grid_model_method(linear_thermal_grid_model_method)
# Obtain linear thermal grid models.
linear_thermal_grid_model = LinearThermalGridModelGlobal(thermal_grid_model, thermal_power_flow_solution)
linear_thermal_grid_models = dict(
zip(thermal_grid_model.timesteps, itertools.repeat(linear_thermal_grid_model))
)
self.__init__(thermal_grid_model, linear_thermal_grid_models)
@multimethod
def __init__(
self,
thermal_grid_model: ThermalGridModel,
linear_thermal_grid_models: typing.Dict[pd.Timestamp, LinearThermalGridModel],
):
# Store attributes.
self.thermal_grid_model = thermal_grid_model
self.timesteps = self.thermal_grid_model.timesteps
self.linear_thermal_grid_models = linear_thermal_grid_models
@staticmethod
def check_linear_thermal_grid_model_method(linear_thermal_grid_model_method):
if not issubclass(linear_thermal_grid_model_method, LinearThermalGridModel):
raise ValueError(f"Invalid linear thermal grid model method: {linear_thermal_grid_model_method}")
def define_optimization_problem(
self,
optimization_problem: mesmo.solutions.OptimizationProblem,
price_data: mesmo.data_interface.PriceData,
scenarios: typing.Union[list, pd.Index] = None,
**kwargs,
):
# Defined optimization problem definitions through respective sub-methods.
self.define_optimization_variables(optimization_problem, scenarios=scenarios)
self.define_optimization_parameters(optimization_problem, price_data, scenarios=scenarios, **kwargs)
self.define_optimization_constraints(optimization_problem, scenarios=scenarios)
self.define_optimization_objective(optimization_problem, scenarios=scenarios)
def define_optimization_variables(
self, optimization_problem: mesmo.solutions.OptimizationProblem, scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Define DER power vector variables.
optimization_problem.define_variable(
"der_thermal_power_vector", scenario=scenarios, timestep=self.timesteps, der=self.thermal_grid_model.ders
)
# Define node head, branch flow and pump power variables.
optimization_problem.define_variable(
"node_head_vector", scenario=scenarios, timestep=self.timesteps, node=self.thermal_grid_model.nodes
)
optimization_problem.define_variable(
"branch_flow_vector", scenario=scenarios, timestep=self.timesteps, branch=self.thermal_grid_model.branches
)
optimization_problem.define_variable("pump_power", scenario=scenarios, timestep=self.timesteps)
def define_optimization_parameters(
self,
optimization_problem: mesmo.solutions.OptimizationProblem,
price_data: mesmo.data_interface.PriceData,
node_head_vector_minimum: np.ndarray = None,
branch_flow_vector_maximum: np.ndarray = None,
scenarios: typing.Union[list, pd.Index] = None,
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Obtain timestep interval in hours, for conversion of power to energy.
timestep_interval_hours = (self.timesteps[1] - self.timesteps[0]) / | pd.Timedelta("1h") | pandas.Timedelta |
# -*- coding: utf-8 -*-
"""
Zerodha Kite Connect - candlestick pattern scanner
@author: <NAME> (http://rasuquant.com/wp/)
"""
from kiteconnect import KiteConnect
import pandas as pd
import datetime as dt
import os
import time
import numpy as np
from technicalta import *
#cwd = os.chdir("D:\\Udemy\\Zerodha KiteConnect API\\1_account_authorization")
apikey = '<KEY>'
#generate trading session
'''access_token = open("access_token.txt",'r').read()
key_secret = open("api_key.txt",'r').read().split()
kite = KiteConnect(api_key=key_secret[0])
kite.set_access_token(access_token)
#get dump of all NSE instruments
instrument_dump = kite.instruments("NSE")
instrument_df = pd.DataFrame(instrument_dump)
'''
def instrumentLookup(instrument_df,symbol):
"""Looks up instrument token for a given script from instrument dump"""
try:
return instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]
except:
return -1
def fetchOHLC(ticker,interval,duration):
"""extracts historical data and outputs in the form of dataframe"""
instrument = instrumentLookup(instrument_df,ticker)
data = pd.DataFrame(kite.historical_data(instrument,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))
data.set_index("date",inplace=True)
return data
def doji(ohlc_df):
"""returns dataframe with doji candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df['close']-df['open']).median()#abs(df["close"]- df["open"]).median()
df["doji"] = abs(df["close"] - df["open"]) <= (0.05 * avg_candle_size)
return df
def maru_bozu(ohlc_df):
"""returns dataframe with maru bozu candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df["close"] - df["open"]).median()
df["h-c"] = df["high"]-df["close"]
df["l-o"] = df["low"]-df["open"]
df["h-o"] = df["high"]-df["open"]
df["l-c"] = df["low"]-df["close"]
df["maru_bozu"] = np.where((df["close"] - df["open"] > 2*avg_candle_size) & \
(df[["h-c","l-o"]].max(axis=1) < 0.005*avg_candle_size),"maru_bozu_green",
np.where((df["open"] - df["close"] > 2*avg_candle_size) & \
(abs(df[["h-o","l-c"]]).max(axis=1) < 0.005*avg_candle_size),"maru_bozu_red",False))
df.drop(["h-c","l-o","h-o","l-c"],axis=1,inplace=True)
return df
def hammer(ohlc_df):
"""returns dataframe with hammer candle column"""
df = ohlc_df.copy()
df["hammer"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["close"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["open"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def shooting_star(ohlc_df):
"""returns dataframe with shooting star candle column"""
df = ohlc_df.copy()
df["sstar"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["high"] - df["close"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["high"] - df["open"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def levels(ohlc_day):
"""returns pivot point and support/resistance levels"""
high = round(ohlc_day["high"][-1],2)
low = round(ohlc_day["low"][-1],2)
close = round(ohlc_day["close"][-1],2)
pivot = round((high + low + close)/3,2)
r1 = round((2*pivot - low),2)
r2 = round((pivot + (high - low)),2)
r3 = round((high + 2*(pivot - low)),2)
s1 = round((2*pivot - high),2)
s2 = round((pivot - (high - low)),2)
s3 = round((low - 2*(high - pivot)),2)
return (pivot,r1,r2,r3,s1,s2,s3)
def trend(ohlc_df,n):
"function to assess the trend by analyzing each candle"
df = ohlc_df.copy()
df["up"] = np.where(df["low"]>=df["low"].shift(1),1,0)
df["dn"] = np.where(df["high"]<=df["high"].shift(1),1,0)
if df["close"][-1] > df["open"][-1]:
if df["up"][-1*n:].sum() >= 0.7*n:
return "uptrend"
elif df["open"][-1] > df["close"][-1]:
if df["dn"][-1*n:].sum() >= 0.7*n:
return "downtrend"
else:
return None
def res_sup(ohlc_df,ohlc_day):
"""calculates closest resistance and support levels for a given candle"""
level = ((ohlc_df["close"][-1] + ohlc_df["open"][-1])/2 + (ohlc_df["high"][-1] + ohlc_df["low"][-1])/2)/2
p,r1,r2,r3,s1,s2,s3 = levels(ohlc_day)
l_r1=level-r1
l_r2=level-r2
l_r3=level-r3
l_p=level-p
l_s1=level-s1
l_s2=level-s2
l_s3=level-s3
lev_ser = pd.Series([l_p,l_r1,l_r2,l_r3,l_s1,l_s2,l_s3],index=["p","r1","r2","r3","s1","s2","s3"])
sup = lev_ser[lev_ser>0].idxmin()
res = lev_ser[lev_ser>0].idxmax()
return (eval('{}'.format(res)), eval('{}'.format(sup)))
def candle_type(ohlc_df):
"""returns the candle type of the last candle of an OHLC DF"""
'''ohlc_df['open']=int(ohlc_df['open'])
ohlc_df['close']=int(ohlc_df['close'])
ohlc_df['high']=int(ohlc_df['high'])
ohlc_df['low']=int(ohlc_df['low'])'''
candle = None
if doji(ohlc_df)["doji"][-1] == True:
candle = "doji"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_green":
candle = "maru_bozu_green"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_red":
candle = "maru_bozu_red"
if shooting_star(ohlc_df)["sstar"][-1] == True:
candle = "shooting_star"
if hammer(ohlc_df)["hammer"][-1] == True:
candle = "hammer"
return candle
def candle_pattern(ohlc_df,ohlc_day):
"""returns the candle pattern identified"""
pattern = None
signi = "low"
avg_candle_size = abs(ohlc_df["close"] - ohlc_df["open"]).median()
sup, res = res_sup(ohlc_df,ohlc_day)
if (sup - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (sup + 1.5*avg_candle_size):
signi = "HIGH"
if (res - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (res + 1.5*avg_candle_size):
signi = "HIGH"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] > ohlc_df["close"][-2] \
and ohlc_df["close"][-1] > ohlc_df["open"][-1]:
pattern = "doji_bullish"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] < ohlc_df["close"][-2] \
and ohlc_df["close"][-1] < ohlc_df["open"][-1]:
pattern = "doji_bearish"
if candle_type(ohlc_df) == "maru_bozu_green":
pattern = "maru_bozu_bullish"
if candle_type(ohlc_df) == "maru_bozu_red":
pattern = "maru_bozu_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "hammer":
pattern = "hanging_man_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" and candle_type(ohlc_df) == "hammer":
pattern = "hammer_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "shooting_star":
pattern = "shooting_star_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["close"][-2] \
and ohlc_df["low"][-1] > ohlc_df["open"][-2]:
pattern = "harami_cross_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["open"][-2] \
and ohlc_df["low"][-1] > ohlc_df["close"][-2]:
pattern = "harami_cross_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["open"][-1] > ohlc_df["high"][-2] \
and ohlc_df["close"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["close"][-1] > ohlc_df["high"][-2] \
and ohlc_df["open"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bullish"
return "Significance - {}, Pattern - {}".format(signi,pattern)
##############################################################################################
tickers = ["ZEEL","WIPRO","VEDL","ULTRACEMCO","UPL","TITAN","TECHM","TATASTEEL",
"TATAMOTORS","TCS","SUNPHARMA","SBIN","SHREECEM","RELIANCE","POWERGRID",
"ONGC","NESTLEIND","NTPC","MARUTI","M&M","LT","KOTAKBANK","JSWSTEEL","INFY",
"INDUSINDBK","IOC","ITC","ICICIBANK","HDFC","HINDUNILVR","HINDALCO",
"HEROMOTOCO","HDFCBANK","HCLTECH","GRASIM","GAIL","EICHERMOT","DRREDDY",
"COALINDIA","CIPLA","BRITANNIA","INFRATEL","BHARTIARTL","BPCL","BAJAJFINSV",
"BAJFINANCE","BAJAJ-AUTO","AXISBANK","ASIANPAINT","ADANIPORTS","IDEA",
"MCDOWELL-N","UBL","NIACL","SIEMENS","SRTRANSFIN","SBILIFE","PNB",
"PGHH","PFC","PEL","PIDILITIND","PETRONET","PAGEIND","OFSS","NMDC","NHPC",
"MOTHERSUMI","MARICO","LUPIN","L&TFH","INDIGO","IBULHSGFIN","ICICIPRULI",
"ICICIGI","HINDZINC","HINDPETRO","HAVELLS","HDFCLIFE","HDFCAMC","GODREJCP",
"GICRE","DIVISLAB","DABUR","DLF","CONCOR","COLPAL","CADILAHC","BOSCHLTD",
"BIOCON","BERGEPAINT","BANKBARODA","BANDHANBNK","BAJAJHLDNG","DMART",
"AUROPHARMA","ASHOKLEY","AMBUJACEM","ADANITRANS","ACC",
"WHIRLPOOL","WABCOINDIA","VOLTAS","VINATIORGA","VBL","VARROC","VGUARD",
"UNIONBANK","UCOBANK","TRENT","TORNTPOWER","TORNTPHARM","THERMAX","RAMCOCEM",
"TATAPOWER","TATACONSUM","TVSMOTOR","TTKPRESTIG","SYNGENE","SYMPHONY",
"SUPREMEIND","SUNDRMFAST","SUNDARMFIN","SUNTV","STRTECH","SAIL","SOLARINDS",
"SHRIRAMCIT","SCHAEFFLER","SANOFI","SRF","SKFINDIA","SJVN","RELAXO",
"RAJESHEXPO","RECLTD","RBLBANK","QUESS","PRESTIGE","POLYCAB","PHOENIXLTD",
"PFIZER","PNBHOUSING","PIIND","OIL","OBEROIRLTY","NAM-INDIA","NATIONALUM",
"NLCINDIA","NBCC","NATCOPHARM","MUTHOOTFIN","MPHASIS","MOTILALOFS","MINDTREE",
"MFSL","MRPL","MANAPPURAM","MAHINDCIE","M&MFIN","MGL","MRF","LTI","LICHSGFIN",
"LTTS","KANSAINER","KRBL","JUBILANT","JUBLFOOD","JINDALSTEL","JSWENERGY",
"IPCALAB","NAUKRI","IGL","IOB","INDHOTEL","INDIANB","IBVENTURES","IDFCFIRSTB",
"IDBI","ISEC","HUDCO","HONAUT","HAL","HEXAWARE","HATSUN","HEG","GSPL",
"GUJGASLTD","GRAPHITE","GODREJPROP","GODREJIND","GODREJAGRO","GLENMARK",
"GLAXO","GILLETTE","GMRINFRA","FRETAIL","FCONSUMER","FORTIS","FEDERALBNK",
"EXIDEIND","ESCORTS","ERIS","ENGINERSIN","ENDURANCE","EMAMILTD","EDELWEISS",
"EIHOTEL","LALPATHLAB","DALBHARAT","CUMMINSIND","CROMPTON","COROMANDEL","CUB",
"CHOLAFIN","CHOLAHLDNG","CENTRALBK","CASTROLIND","CANBK","CRISIL","CESC",
"BBTC","BLUEDART","BHEL","BHARATFORG","BEL","BAYERCROP","BATAINDIA",
"BANKINDIA","BALKRISIND","ATUL","ASTRAL","APOLLOTYRE","APOLLOHOSP",
"AMARAJABAT","ALKEM","APLLTD","AJANTPHARM","ABFRL","ABCAPITAL","ADANIPOWER",
"ADANIGREEN","ADANIGAS","ABBOTINDIA","AAVAS","AARTIIND","AUBANK","AIAENG","3MINDIA"]
def main():
for ticker in tickers:
try:
ohlc = fetchOHLC(ticker, '5minute',5)
ohlc_day = fetchOHLC(ticker, 'day',30)
ohlc_day = ohlc_day.iloc[:-1,:]
cp = candle_pattern(ohlc,ohlc_day)
print(ticker, ": ",cp)
except:
print("skipping for ",ticker)
'''
# Continuous execution
starttime=time.time()
timeout = time.time() + 60*60*1 # 60 seconds times 60 meaning the script will run for 1 hr
while time.time() <= timeout:
try:
print("passthrough at ",time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
main()
time.sleep(300 - ((time.time() - starttime) % 300.0)) # 300 second interval between each new execution
except KeyboardInterrupt:
print('\n\nKeyboard exception received. Exiting.')
exit()'''
from pprint import pprint
def AlphaData_fxintraday(frombase,to,interval):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=FX_INTRADAY&from_symbol={}&to_symbol={}&interval={}min&apikey={}".format(frombase,to,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series FX ({}min)'.format(interval)]
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df=df.drop(['1. open','2. high','3. low', '4. close'], axis=1)
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
frombase=['EUR','USD','GBP','AUD','EUR']
to=['USD','JPY','CAD','CNY','CHF','HKD','GBP','KRW']
'''
for j in frombase:
for i in to:
pprint('{}/{} in process'.format(i,j))
data=AlphaData_intraday(i,j,60)
pprint('{}/{} Done'.format(i,j))
time.sleep(30)
'''
def AlphaData_fxdaily(frombase,to):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=FX_DAILY&from_symbol={}&to_symbol={}&apikey={}".format(frombase,to,apikey)
#url="https://www.alphavantage.co/query?function=FX_INTRADAY&from_symbol={}&to_symbol={}&interval={}min&apikey={}".format(frombase,to,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series FX (Daily)']
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df=df.drop(['1. open','2. high','3. low', '4. close'], axis=1)
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
'''
for j in frombase:
for i in to:
pprint('{}/{} in process'.format(i,j))
dataintra=AlphaData_intraday(i,j,5)
datadaily=AlphaData_daily(i,j)
pprint(dataintra)
if len(dataintra) > 0:
if len(datadaily) > 0 :
pprint(candle_type(dataintra))
#cp = candle_pattern(dataintra,datadaily)
pprint('{}/{} Done'.format(i,j))
time.sleep(5)'''
'''
for j in frombase:
for i in to:
pprint('{}/{} in process'.format(i,j))
data=AlphaData_daily(i,j)
pprint('{}/{} Done'.format(i,j))
time.sleep(5)
'''
def AlphaData_intraday(symbol,interval):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol={}&interval={}min&apikey={}".format(symbol,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series ({}min)'.format(interval)]
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df['volume']=df['5. volume']
df['volume']=df['5. volume']
df=df.drop(['1. open','2. high','3. low', '4. close','5. volume'], axis=1)
df['open']=pd.to_numeric(df['open'])
df['high']=pd.to_numeric(df['high'])
df['low']=pd.to_numeric(df['low'])
df['close']=pd.to_numeric(df['close'])
df['volume']=pd.to_numeric(df['volume'])
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
def AlphaData_daily(symbol):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&apikey={}".format(symbol,apikey)
#url="https://www.alphavantage.co/query?function=FX_INTRADAY&from_symbol={}&to_symbol={}&interval={}min&apikey={}".format(frombase,to,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series (Daily)']
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df['volume']=df['5. volume']
df=df.drop(['1. open','2. high','3. low', '4. close','5. volume'], axis=1)
df['open']=pd.to_numeric(df['open'])
df['high']=pd.to_numeric(df['high'])
df['low']=pd.to_numeric(df['low'])
df['close']=pd.to_numeric(df['close'])
df['volume']=pd.to_numeric(df['volume'])
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
''''
for i in to:
pprint('{}/{} in process'.format(i,j))
dataintra=AlphaData_intraday(i,5)
datadaily=AlphaData_daily(i)
pprint(dataintra)
if len(dataintra) > 0:
if len(datadaily) > 0 :
pprint(candle_type(dataintra))
#cp = candle_pattern(dataintra,datadaily)
pprint('{}/{} Done'.format(i,j))
time.sleep(5)'''
def main():
for ticker in tickers:
try:
ohlc = fetchOHLC(ticker, '5minute',5)
ohlc_day = fetchOHLC(ticker, 'day',30)
ohlc_day = ohlc_day.iloc[:-1,:]
cp = candle_pattern(ohlc,ohlc_day)
print(ticker, ": ",cp)
except:
print("skipping for ",ticker)
ticks=['atvi','adbe','amd','alxn','algn','goog','googl','amzn','amgn','adi','anss','aapl','amat','asml','adsk','adp','bidu','biib','bmrn','bkng','avgo','cdns','cdw','cern','chtr','chkp','ctas','csco','ctxs','ctsh','cmcsa','cprt','cost','csx','dxcm','docu','dltr','ebay','ea','exc','expe','fb','fast','fisv','fox','foxa','gild','idxx','ilmn','incy','intc','intu','isrg','jd','klac','lrcx','lbtya','lbtyk','lulu','mar','mxim','meli','mchp','mu','msft','mrna','mdlz','mnst','ntap','ntes','nflx','nvda','nxpi','orly','pcar','payx','pypl','pep','qcom','regn','rost','sgen','siri','swks','splk','sbux','snps','tmus','ttwo','tsla','txn','khc','tcom','ulta','vrsn','vrsk','vrtx','wba','wdc','wday','xel','xlnx','zm']
patterns=['Two Crows',
'Three Black Crows',
'Three Inside Up/Down',
'Three-Line Strike',
'Three Outside Up/Down',
'Three Stars In The South',
'Three Advancing White Soldiers',
'Abandoned Baby',
'Advance Block',
'Belt-hold',
'Breakaway',
'Closing Marubozu',
'Concealing Baby Swallow',
'Counterattack',
'Dark Cloud Cover',
'Doji',
'Doji Star',
'Dragonfly Doji',
'Engulfing Pattern',
'Evening Doji Star',
'Evening Star',
'Up/Down-gap side-by-side white lines',
'Gravestone Doji',
'Hammer',
'Hanging Man',
'Harami Pattern',
'Harami Cross Pattern',
'High-Wave Candle',
'Hikkake Pattern',
'Modified Hikkake Pattern',
'Homing Pigeon',
'Identical Three Crows',
'In-Neck Pattern',
'Inverted Hammer',
'Kicking',
'Kicking - bull/bear',
'Ladder Bottom',
'Long Legged Doji',
'Long Line Candle',
'Marubozu',
'Matching Low',
'Mat Hold',
'Morning Doji Star',
'Morning Star',
'On-Neck Pattern',
'Piercing Pattern',
'Rickshaw Man',
'Rising/Falling Three Methods',
'Separating Lines',
'Shooting Star',
'Short Line Candle',
'Spinning Top',
'Stalled Pattern',
'Stick Sandwich',
'Takuri',
'Tasuki Gap',
'Thrusting Pattern',
'Tristar Pattern',
'Unique 3 River',
'Upside Gap Two Crows',
'Upside/Downside Gap Three Methods']
def texterconversion(text):
tex=text.replace('/','').replace('-','_').replace(' ','_').replace('(','').replace(')','')
return tex
def technical_lib(technical,df):
open=df['open']
high=df['high']
low=df['low']
close=df['close']
if technical == 'Two_Crows':
tech=Two_Crows(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Black_Crows':
tech=Three_Black_Crows(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Inside_UpDown':
tech=Three_Inside_UpDown(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Line_Strike':
tech=Three_Line_Strike(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Outside_UpDown':
tech=Three_Outside_UpDown(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Stars_In_The_South':
tech=Three_Stars_In_The_South(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Advancing_White_Soldiers':
tech=Three_Advancing_White_Soldiers(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Abandoned_Baby':
tech=Abandoned_Baby(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Advance_Block':
tech=Advance_Block(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Belt_hold':
tech=Belt_hold(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Breakaway':
tech=Breakaway(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Closing_Marubozu':
tech=Closing_Marubozu(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Concealing_Baby_Swallow':
tech=Concealing_Baby_Swallow(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Counterattack':
tech=Counterattack(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Dark_Cloud_Cover':
tech=Dark_Cloud_Cover(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Doji':
tech=Doji(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Doji_Star':
tech=Doji_Star(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Dragonfly_Doji':
tech=Dragonfly_Doji(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Engulfing_Pattern':
tech=Engulfing_Pattern(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Evening_Doji_Star':
tech=Evening_Doji_Star(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Evening_Star':
tech=Evening_Star(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'UpDown_gap_side_by_side_white_lines':
tech=UpDown_gap_side_by_side_white_lines(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Gravestone_Doji':
tech=Gravestone_Doji(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Hammer':
tech=Hammer(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Hanging_Man':
tech=Hanging_Man(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Harami_Pattern':
tech=Harami_Pattern(open,high,low,close)
tech= | pd.DataFrame(tech) | pandas.DataFrame |
import numpy as np
from numpy.testing import assert_equal
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
from linearmodels.iv.data import IVData
try:
import xarray as xr
MISSING_XARRAY = False
except ImportError:
MISSING_XARRAY = True
def test_numpy_2d() -> None:
x = np.empty((10, 2))
xdh = IVData(x)
assert xdh.ndim == x.ndim
assert xdh.cols == ["x.0", "x.1"]
assert xdh.rows == list(np.arange(10))
assert_equal(xdh.ndarray, x)
df = pd.DataFrame(x, columns=xdh.cols, index=xdh.rows)
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 2)
assert xdh.labels == {0: xdh.rows, 1: xdh.cols}
def test_numpy_1d() -> None:
x = np.empty(10)
xdh = IVData(x)
assert xdh.ndim == 2
assert xdh.cols == ["x"]
assert xdh.rows == list(np.arange(10))
assert_equal(xdh.ndarray, x[:, None])
df = pd.DataFrame(x[:, None], columns=xdh.cols, index=xdh.rows)
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 1)
def test_pandas_df_numeric() -> None:
x = np.empty((10, 2))
index = pd.date_range("2017-01-01", periods=10)
xdf = pd.DataFrame(x, columns=["a", "b"], index=index)
xdh = IVData(xdf)
assert xdh.ndim == 2
assert xdh.cols == list(xdf.columns)
assert xdh.rows == list(xdf.index)
assert_equal(xdh.ndarray, x)
df = pd.DataFrame(x, columns=xdh.cols, index=xdh.rows).asfreq("D")
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 2)
def test_pandas_series_numeric() -> None:
x = np.empty(10)
index = pd.date_range("2017-01-01", periods=10)
xs = pd.Series(x, name="charlie", index=index)
xdh = IVData(xs)
assert xdh.ndim == 2
assert xdh.cols == [xs.name]
assert xdh.rows == list(xs.index)
assert_equal(xdh.ndarray, x[:, None])
df = pd.DataFrame(x[:, None], columns=xdh.cols, index=xdh.rows).asfreq("D")
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 1)
@pytest.mark.skipif(MISSING_XARRAY, reason="xarray not installed")
def test_xarray_1d() -> None:
x_np = np.random.randn(10)
x = xr.DataArray(x_np)
dh = IVData(x, "some_variable")
assert_equal(dh.ndarray, x_np[:, None])
assert dh.rows == list(np.arange(10))
assert dh.cols == ["some_variable.0"]
expected = pd.DataFrame(x_np, columns=dh.cols, index=dh.rows)
assert_frame_equal(expected, dh.pandas)
index = pd.date_range("2017-01-01", periods=10)
x = xr.DataArray(x_np, [("time", index)])
dh = IVData(x, "some_variable")
assert_equal(dh.ndarray, x_np[:, None])
assert_series_equal(pd.Series(dh.rows), pd.Series(list(index)))
assert dh.cols == ["some_variable.0"]
expected = pd.DataFrame(x_np[:, None], columns=dh.cols, index=dh.rows)
assert_frame_equal(expected, dh.pandas)
@pytest.mark.skipif(MISSING_XARRAY, reason="xarray not installed")
def test_xarray_2d() -> None:
x_np = np.random.randn(10, 2)
x = xr.DataArray(x_np)
dh = IVData(x)
assert_equal(dh.ndarray, x_np)
assert dh.rows == list(np.arange(10))
assert dh.cols == ["x.0", "x.1"]
expected = pd.DataFrame(x_np, columns=dh.cols, index=dh.rows)
assert_frame_equal(expected, dh.pandas)
index = pd.date_range("2017-01-01", periods=10)
x = xr.DataArray(x_np, [("time", index), ("variables", ["apple", "banana"])])
dh = IVData(x)
assert_equal(dh.ndarray, x_np)
assert_series_equal(pd.Series(dh.rows), pd.Series(list(index)))
assert dh.cols == ["apple", "banana"]
expected = pd.DataFrame(x_np, columns=dh.cols, index=dh.rows)
assert_frame_equal(expected, dh.pandas)
def test_invalid_types() -> None:
with pytest.raises(ValueError):
IVData(np.empty((1, 1, 1)))
with pytest.raises(ValueError):
IVData(np.empty((10, 2, 2)))
with pytest.raises(TypeError):
class AnotherClass(object):
_ndim = 2
@property
def ndim(self) -> int:
return self._ndim
IVData(AnotherClass())
def test_string_cat_equiv() -> None:
s1 = pd.Series(["a", "b", "a", "b", "c", "d", "a", "b"])
s2 = pd.Series(np.arange(8.0))
s3 = pd.Series(
["apple", "banana", "apple", "banana", "cherry", "date", "apple", "banana"]
)
df = pd.DataFrame({"string": s1, "number": s2, "other_string": s3})
dh = IVData(df)
df_cat = df.copy()
df_cat["string"] = df_cat["string"].astype("category")
dh_cat = IVData(df_cat)
| assert_frame_equal(dh.pandas, dh_cat.pandas) | pandas.testing.assert_frame_equal |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
pd.Timestamp("2015-01-15"),
# Split before Q1 event
pd.Timestamp("2015-01-18"),
# Split after Q1 event and before Q2 event
pd.Timestamp("2015-01-30"),
# Filter out - this is after our date index
pd.Timestamp("2016-01-01"),
),
}
)
sid_10_splits = pd.DataFrame(
{
SID_FIELD_NAME: 10,
"ratio": (0.2, 0.3),
"effective_date": (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp("2015-01-07"),
# Apply a single split before Q1 event.
pd.Timestamp("2015-01-20"),
),
}
)
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame(
{
SID_FIELD_NAME: 20,
"ratio": (
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
pd.Timestamp("2015-01-30"),
),
}
)
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame(
{
SID_FIELD_NAME: 30,
"ratio": (8, 9, 10, 11, 12),
"effective_date": (
# Split before the event and before the
# split-asof-date.
pd.Timestamp("2015-01-07"),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp("2015-01-09"),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
),
}
)
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame(
{
SID_FIELD_NAME: 40,
"ratio": (13, 14),
"effective_date": (
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-22"),
),
}
)
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame(
{
SID_FIELD_NAME: 50,
"ratio": (15, 16),
"effective_date": (
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
),
}
)
return pd.concat(
[
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
]
)
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-12")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-01-21")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 240.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 250.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 150.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 130 * 1 / 10, cls.window_test_start_date),
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-09"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 230 * 1 / 10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-12")),
],
| pd.Timestamp("2015-01-12") | pandas.Timestamp |
# coding: utf-8
# In[1]:
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
import nltk
import random
import numpy as np
from collections import Counter, OrderedDict
import nltk
import math
import string
import re
import os
import time
from collections import OrderedDict
import pandas as pd
| pd.set_option('max_row', 1500) | pandas.set_option |
import pandas as pd
import numpy as np
#from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import mutual_info_classif,chi2
from sklearn.feature_selection import SelectKBest, SelectPercentile
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.metrics import roc_auc_score, mean_squared_error
# 2018.11.17 Created by Eamon.Zhang
def constant_feature_detect(data,threshold=0.98):
""" detect features that show the same value for the
majority/all of the observations (constant/quasi-constant features)
Parameters
----------
data : pd.Dataframe
threshold : threshold to identify the variable as constant
Returns
-------
list of variables names
"""
data_copy = data.copy(deep=True)
quasi_constant_feature = []
for feature in data_copy.columns:
predominant = (data_copy[feature].value_counts() / np.float(
len(data_copy))).sort_values(ascending=False).values[0]
if predominant >= threshold:
quasi_constant_feature.append(feature)
print(len(quasi_constant_feature),' variables are found to be almost constant')
return quasi_constant_feature
def corr_feature_detect(data,threshold=0.8):
""" detect highly-correlated features of a Dataframe
Parameters
----------
data : pd.Dataframe
threshold : threshold to identify the variable correlated
Returns
-------
pairs of correlated variables
"""
corrmat = data.corr()
corrmat = corrmat.abs().unstack() # absolute value of corr coef
corrmat = corrmat.sort_values(ascending=False)
corrmat = corrmat[corrmat >= threshold]
corrmat = corrmat[corrmat < 1] # remove the digonal
corrmat = | pd.DataFrame(corrmat) | pandas.DataFrame |
import sys
sys.path.append("../")
import argparse
from augur.utils import json_to_tree
import Bio
import Bio.Phylo
import json
import pandas as pd
import sys
from Helpers import get_y_positions
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("tree", help="auspice tree JSON")
parser.add_argument("output", help="tab-delimited file of attributes per node of the given tree")
parser.add_argument("--include-internal-nodes", action="store_true", help="include data from internal nodes in output")
parser.add_argument("--attributes", nargs="+", help="names of attributes to export from the given tree")
args = parser.parse_args()
# Load tree from JSON.
with open(args.tree, "r") as fh:
tree_json = json.load(fh)
tree = json_to_tree(tree_json)
# Collect attributes per node from the tree to export.
records = []
if args.attributes:
attributes = args.attributes
else:
attributes = sorted(list(tree.root.node_attr.keys()) + list(tree.root.branch_attrs.keys()))
for node in tree.find_clades(terminal=True):
if node.is_terminal() or args.include_internal_nodes:
record = {
"name": node.name
}
for attribute in attributes:
if attribute in node.node_attrs:
record[attribute] = node.node_attrs[attribute]["value"]
elif attribute in node.branch_attrs:
record[attribute] = node.branch_attrs[attribute]["value"]
else:
print(f"Attribute '{attribute}' missing from node '{node.name}'", file=sys.stderr)
records.append(record)
tree_records = []
heights = get_y_positions(tree)
for node in tree.find_clades(terminal=True):
tree_records.append(dict(strain=node.name, y=heights[node]))
tree_records_df = | pd.DataFrame(tree_records) | pandas.DataFrame |
import time as t
import io
import json
import requests as r
import pandas as pd
from QualtricsAPI.Setup import Credentials
from QualtricsAPI.JSON import Parser
from QualtricsAPI.Exceptions import Qualtrics500Error, Qualtrics503Error, Qualtrics504Error, Qualtrics400Error, Qualtrics401Error, Qualtrics403Error
class XMDirectory(Credentials):
''' This class contains methods that give users the ability to work with their contact data within the
XMDirectory.'''
def __init__(self, token=None, directory_id=None, data_center=None):
self.token = token
self.data_center = data_center
self.directory_id = directory_id
def create_contact_in_XM(self, **kwargs):
'''This function gives you the ability to create a contact in your XM Directory. This method does re-list not each
element that you just created. It returns the XMDirectory "Contact ID" associated with the newly created XM directory
contact.
:param dynamic_payload: A dictionary containing the correct key-value pairs.
:type dynamic_payload: dict
:param first_name: The contacts first name.
:type first_name: str
:param last_name: The contacts last name.
:type last_name: str
:param email: the contacts email.
:type email: str
:param phone: the contacts phone number.
:type phone: str
:param language: the native language of the contact. (Default: English)
:type language: str
:param metadata: any relevant contact metadata.
:type metadata: dict
:return: The newly created contact id (CID) in XMDirectory.
:type return: str
'''
dynamic_payload={}
verbose = False
for key in list(kwargs.keys()):
assert key in ['first_name', 'last_name', 'email', 'unsubscribed', 'language', 'external_ref', 'metadata', 'phone', 'verbose', 'dynamic_payload'], "Hey there! You can only pass in parameters with names in the list, ['first_name', 'last_name', 'email', 'unsubscribed', 'language', 'external_ref', 'metadata']"
if key == 'first_name':
dynamic_payload.update({'firstName': kwargs[str(key)]})
elif key == 'last_name':
dynamic_payload.update({'lastName': kwargs[str(key)]})
elif key == 'email':
dynamic_payload.update({'email': kwargs[str(key)]})
elif key == 'phone':
dynamic_payload.update({'phone': kwargs[str(key)]})
elif key == 'language':
dynamic_payload.update({'language': kwargs[str(key)]})
elif key == 'external_ref':
dynamic_payload.update({'extRef': kwargs[str(key)]})
elif key == 'unsubscribed':
dynamic_payload.update({'unsubscribed': kwargs[str(key)]})
elif key == 'phone':
dynamic_payload.update({'phone': kwargs[str(key)]})
elif key == 'metadata':
assert isinstance(kwargs['metadata'], dict), 'Hey there, your metadata parameter needs to be of type "dict"!'
dynamic_payload.update({'embeddedData': kwargs[str(key)]})
elif key == 'dynamic_payload':
dynamic_payload = dict(kwargs[str(key)])
elif key == 'verbose':
verbose = True
headers, base_url = self.header_setup(content_type=True, xm=True)
url = f"{base_url}/contacts"
request = r.post(url, json=dynamic_payload, headers=headers)
response = request.json()
try:
if response['meta']['httpStatus'] == '500 - Internal Server Error':
raise Qualtrics500Error('500 - Internal Server Error')
elif response['meta']['httpStatus'] == '503 - Temporary Internal Server Error':
raise Qualtrics503Error('503 - Temporary Internal Server Error')
elif response['meta']['httpStatus'] == '504 - Gateway Timeout':
raise Qualtrics504Error('504 - Gateway Timeout')
elif response['meta']['httpStatus'] == '400 - Bad Request':
raise Qualtrics400Error('Qualtrics Error\n(Http Error: 400 - Bad Request): There was something invalid about the request.')
elif response['meta']['httpStatus'] == '401 - Unauthorized':
raise Qualtrics401Error('Qualtrics Error\n(Http Error: 401 - Unauthorized): The Qualtrics API user could not be authenticated or does not have authorization to access the requested resource.')
elif response['meta']['httpStatus'] == '403 - Forbidden':
raise Qualtrics403Error('Qualtrics Error\n(Http Error: 403 - Forbidden): The Qualtrics API user was authenticated and made a valid request, but is not authorized to access this requested resource.')
except (Qualtrics500Error, Qualtrics503Error, Qualtrics504Error) as e:
# Recursive call to handle Internal Server Errors
return self.create_contact_in_XM(dynamic_payload=dynamic_payload)
except (Qualtrics400Error, Qualtrics401Error, Qualtrics403Error) as e:
# Handle Authorization/Bad Request Errors
return print(e)
else:
if verbose == True:
return response['meta']['httpStatus'], response['result']['id']
else:
return response['result']['id']
def delete_contact(self, contact_id=None):
'''This method will delete a contact from your XMDirectory. (Caution this cannot be reversed once deleted!)
:param contact_id: The unique id associated with each contact in the XM Directory.
:type contact_id: str
:return: A string indicating the success or failure of the method call.
'''
assert contact_id != None, 'Hey, the contact_id parameter cannot be None. You need to pass in a XM Directory Contact ID as a string into the contact_id parameter.'
assert isinstance(contact_id, str) == True, 'Hey there, the contact_id parameter must be of type string.'
assert len(contact_id) == 19, 'Hey, the parameter for "contact_id" that was passed is the wrong length. It should have 19 characters.'
assert contact_id[:4] == 'CID_', 'Hey there! It looks like the Contact ID that was entered is incorrect. It should begin with "CID_". Please try again.'
headers, base_url = self.header_setup(xm=True)
url = f"{base_url}/contacts/{contact_id}"
request = r.delete(url, headers=headers)
response = request.json()
try:
if response['meta']['httpStatus'] == '200 - OK':
return f'Your XM Contact"{contact_id}" has been deleted from the XM Directory.'
except:
print(f"ServerError:\nError Code: {response['meta']['error']['errorCode']}\nError Message: {response['meta']['error']['errorMessage']}")
def update_contact(self, contact_id=None, **kwargs):
'''This method will update a contact from your XMDirectory.
:param contact_id: The unique id associated with each contact in the XM Directory.
:type contact_id: str
:param first_name: The new contact's first name.
:type first_name: str
:param last_name: The new contact's last name.
:type last_name: str
:param email: The new contact's email.
:type email: str
:param phone: The new contact's phone number.
:tyoe phone: str
:param external_ref: The new contact's external reference.
:type external_ref: str
:param unsubscribed: This parameter denotes whether the new contact is unsubscribed from surveys (Default: False).
:type unsbscribed: str
:param language: The language prefered by the new contact (Default: English)
:type language: str
:param metadata: Any relevant contact metadata.
:type metadata: dict
:return: A string indicating the success or failure of the method call.
'''
assert contact_id != None, 'Hey, the contact_id parameter cannot be None. You need to pass in a XM Directory Contact ID as a string into the contact_id parameter.'
assert isinstance(contact_id, str) == True, 'Hey there, the contact_id parameter must be of type string.'
assert len(contact_id) == 19, 'Hey, the parameter for "contact_id" that was passed is the wrong length. It should have 19 characters.'
assert contact_id[:4] == 'CID_', 'Hey there! It looks like the Contact ID that was entered is incorrect. It should begin with "CID_". Please try again.'
dynamic_payload = {}
for key in list(kwargs.keys()):
assert key in ['first_name', 'last_name', 'email', 'unsubscribed', 'language', 'external_ref', 'metadata', 'phone'], "Hey there! You can only pass in parameters with names in the list, ['first_name', 'last_name', 'email', 'unsubscribed', 'language', 'external_ref', 'metadata']"
if key == 'first_name':
dynamic_payload.update({'firstName': kwargs[str(key)]})
elif key == 'last_name':
dynamic_payload.update({'lastName': kwargs[str(key)]})
elif key == 'email':
dynamic_payload.update({'email': kwargs[str(key)]})
elif key == 'phone':
dynamic_payload.update({'phone': kwargs[str(key)]})
elif key == 'language':
dynamic_payload.update({'language': kwargs[str(key)]})
elif key == 'external_ref':
dynamic_payload.update({'extRef': kwargs[str(key)]})
elif key == 'unsubscribed':
dynamic_payload.update({'unsubscribed': kwargs[str(key)]})
elif key == 'phone':
dynamic_payload.update({'phone': kwargs[str(key)]})
elif key == 'metadata':
assert isinstance(kwargs['metadata'], dict), 'Hey there, your metadata parameter needs to be of type "dict"!'
dynamic_payload.update({'embeddedData': kwargs[str(key)]})
headers, base_url = self.header_setup(xm=True)
url = f"{base_url}/contacts/{contact_id}"
request = r.put(url, json=dynamic_payload, headers=headers)
response = request.json()
try:
if response['meta']['httpStatus'] == '500 - Internal Server Error':
raise Qualtrics500Error('500 - Internal Server Error')
except Qualtrics500Error:
attempt = 0
while attempt < 20:
request = r.put(url, json=dynamic_payload, headers=headers)
response = request.json()
if response['meta']['httpStatus'] == '500 - Internal Server Error':
attempt+=1
t.sleep(0.25)
continue
elif response['meta']['httpStatus'] == '200 - OK':
return f'The contact ({contact_id}) was updated in the XM Directory.'
return print(f"ServerError: {response['meta']['httpStatus']}\nError Code: {response['meta']['error']['errorCode']}\nError Message: {response['meta']['error']['errorMessage']}")
except Exception:
print(f"ServerError: {response['meta']['httpStatus']}\nError Code: {response['meta']['error']['errorCode']}\nError Message: {response['meta']['error']['errorMessage']}")
else:
return f'The contact ({contact_id}) was updated in the XM Directory.'
def list_contacts_in_directory(self):
'''This method will list the top-level information about the contacts in your XM Directory. As a word of caution,
this method may take a while to complete depending on the size of your XM Directory. There exists some latency
with between
:return: A Pandas DataFrame
'''
page_size=1000
i=1000
master = pd.DataFrame(columns=['contactId','firstName', 'lastName', 'email', 'phone','unsubscribed', 'language', 'extRef'])
headers, base_url = self.header_setup(xm=True)
url = base_url + f"/contacts?pageSize={page_size}&useNewPaginationScheme=true"
def extract_page(url=url, master=master, page_size=page_size):
''' This is a method that extracts a single page of contacts in a mailing list.'''
try:
request = r.get(url, headers=headers)
response = request.json()
if response['meta']['httpStatus'] == '500 - Internal Server Error':
raise Qualtrics500Error('500 - Internal Server Error')
elif response['meta']['httpStatus'] == '503 - Temporary Internal Server Error':
raise Qualtrics503Error('503 - Temporary Internal Server Error')
elif response['meta']['httpStatus'] == '504 - Gateway Timeout':
raise Qualtrics504Error('504 - Gateway Timeout')
except (Qualtrics500Error, Qualtrics503Error):
t.sleep(0.25)
extract_page(url=url, master=master)
except Qualtrics504Error:
t.sleep(5)
extract_page(url=url, master=master)
except:
t.sleep(10)
extract_page(url=url, master=master)
else:
keys = ['contactId','firstName', 'lastName', 'email', 'phone','unsubscribed', 'language', 'extRef']
contact_lists = Parser().json_parser(response=response, keys=keys, arr=False)
next_page = response['result']['nextPage']
single_contact_list = pd.DataFrame(contact_lists).transpose()
single_contact_list.columns = keys
master = pd.concat([master, single_contact_list]).reset_index(drop=True)
return master, next_page
master, next_page = extract_page()
print(i)
if next_page is None:
return master
else:
while next_page is not None:
master, next_page = extract_page(url=next_page, master=master)
i+=1000
print(i)
return master
def get_contact(self, contact_id=None):
''' This method is similar to the 'list_contacts_in_directory' method. Except it will just return a single contact's
information.
:param contact_id: The unique id associated with each contact in the XM Directory.
:type contact_id: str
:return: A Pandas DataFrame
'''
assert contact_id != None, 'Hey, the contact_id parameter cannot be None. You need to pass in a XM Directory Contact ID as a string into the contact_id parameter.'
assert isinstance(contact_id, str) == True, 'Hey there, the contact_id parameter must be of type string.'
assert len(contact_id) == 19, 'Hey, the parameter for "contact_id" that was passed is the wrong length. It should have 19 characters.'
assert contact_id[:4] == 'CID_', 'Hey there! It looks like the Contact ID that was entered is incorrect. It should begin with "CID_". Please try again.'
headers, base_url = self.header_setup(xm=True)
url = base_url + f'/contacts/{str(contact_id)}'
request = r.get(url, headers=headers)
response = request.json()
try:
primary = pd.DataFrame.from_dict(response['result'], orient='index').transpose()
primary['creationDate'] = pd.to_datetime(primary['creationDate'],unit='ms')
primary['lastModified'] = | pd.to_datetime(primary['lastModified'],unit='ms') | pandas.to_datetime |
# -*-coding=utf-8-*-
__author__ = 'Rocky'
'''
http://30daydo.com
Contact: <EMAIL>
交割单处理 保存交割单到数据库
'''
import os
import datetime
import pandas as pd
import numpy as np
import re
from configure.settings import DBSelector
import fire
pd.set_option('display.max_rows', None)
class DeliveryOrder():
def __init__(self):
self.gj_table = 'tb_delivery_gj_django'
self.hb_table = 'tb_delivery_hb_django'
self.db_init()
def db_init(self):
DB = DBSelector()
self.engine = DB.get_engine('db_stock', 'qq')
self.conn = DB.get_mysql_conn('db_stock', 'qq')
def setpath(self, path):
path = os.path.join(os.getcwd(), path)
if os.path.exists(path) == False:
os.mkdir(path)
os.chdir(path)
# 单独处理华宝证券的数据
def merge_data_HuaBao(self, filename):
try:
# 根据不同的格式选用不同的函数
df = pd.read_csv(filename, encoding='gbk')
except Exception as e:
print(e)
raise OSError("打开文件失败")
df = df.reset_index(drop='True')
df = df.dropna(subset=['成交时间'])
df['成交日期'] = df['成交日期'].astype(np.str) + df['成交时间']
# TODO 重复,删除
df['成交日期'] = df['成交日期'].map(lambda x: datetime.datetime.strptime(
x, "%Y%m%d%H:%M:%S").strftime('%Y-%m-%d %H:%M:%S'))
try:
df['成交日期'] = pd.to_datetime(df['成交日期'])
except Exception as e:
print(e)
del df['股东代码']
del df['成交时间']
df = df[(df['委托类别'] == '买入') | (df['委托类别'] == '卖出')]
df = df.fillna(0)
df = df.sort_values(by='成交日期', ascending=False)
cursor = self.conn.cursor()
insert_cmd = f'''
insert into {self.hb_table} (成交日期,证券代码,证券名称,委托类别,成交数量,成交价格,成交金额,发生金额,佣金,印花税,过户费,其他费) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'''
check_dup = f'''
select * from {self.hb_table} where 成交日期=%s and 证券代码=%s and 委托类别=%s and 成交数量=%s and 发生金额=%s
'''
for index, row in df.iterrows():
date = row['成交日期']
date = date.to_pydatetime()
cursor.execute(check_dup, (date, row['证券代码'], row['委托类别'], row['成交数量'], row['发生金额']))
if cursor.fetchall():
print('有重复数据,忽略')
continue
else:
cursor.execute(insert_cmd, (
date, row['证券代码'], row['证券名称'], row['委托类别'], row['成交数量'], row['成交价格'], row['成交金额'], row['发生金额'],
row['佣金'], row['印花税'], row['过户费'], row['其他费']))
self.conn.commit()
self.conn.close()
# 合并一年的交割单
def years_ht(self):
df_list = []
for i in range(1, 2):
# 固定一个文件
filename = 'HT_2018-05_week4-5.xls'
try:
t = pd.read_table(filename, encoding='gbk',
dtype={'证券代码': np.str})
except Exception as e:
print(e)
continue
df_list.append(t)
df = | pd.concat(df_list) | pandas.concat |
import asyncio
import queue
import uuid
from datetime import datetime
import pandas as pd
from storey import build_flow, Source, Map, Filter, FlatMap, Reduce, FlowError, MapWithState, ReadCSV, Complete, AsyncSource, Choice, \
Event, Batch, Table, NoopDriver, WriteToCSV, DataframeSource, MapClass, JoinWithTable, ReduceToDataFrame, ToDataFrame, WriteToParquet, \
WriteToTSDB, Extend
class ATestException(Exception):
pass
class RaiseEx:
_counter = 0
def __init__(self, raise_after):
self._raise_after = raise_after
def raise_ex(self, element):
if self._counter == self._raise_after:
raise ATestException("test")
self._counter += 1
return element
def test_functional_flow():
controller = build_flow([
Source(),
Map(lambda x: x + 1),
Filter(lambda x: x < 3),
FlatMap(lambda x: [x, x * 10]),
Reduce(0, lambda acc, x: acc + x),
]).run()
for _ in range(100):
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == 3300
def test_csv_reader():
controller = build_flow([
ReadCSV('tests/test.csv', header=True),
FlatMap(lambda x: x),
Map(lambda x: int(x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
termination_result = controller.await_termination()
assert termination_result == 21
def test_csv_reader_error_on_file_not_found():
controller = build_flow([
ReadCSV('tests/idontexist.csv', header=True),
]).run()
try:
controller.await_termination()
assert False
except FlowError as ex:
assert isinstance(ex.__cause__, FileNotFoundError)
def test_csv_reader_as_dict():
controller = build_flow([
ReadCSV('tests/test.csv', header=True, build_dict=True),
FlatMap(lambda x: [x['n1'], x['n2'], x['n3']]),
Map(lambda x: int(x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
termination_result = controller.await_termination()
assert termination_result == 21
def append_and_return(lst, x):
lst.append(x)
return lst
def test_csv_reader_as_dict_with_key_and_timestamp():
controller = build_flow([
ReadCSV('tests/test-with-timestamp.csv', header=True, build_dict=True, key_field='k',
timestamp_field='t', timestamp_format='%d/%m/%Y %H:%M:%S'),
Reduce([], append_and_return, full_event=True),
]).run()
termination_result = controller.await_termination()
assert len(termination_result) == 2
assert termination_result[0].key == 'm1'
assert termination_result[0].time == datetime(2020, 2, 15, 2, 0)
assert termination_result[0].body == {'k': 'm1', 't': datetime(2020, 2, 15, 2, 0), 'v': 8, 'b': True}
assert termination_result[1].key == 'm2'
assert termination_result[1].time == datetime(2020, 2, 16, 2, 0)
assert termination_result[1].body == {'k': 'm2', 't': datetime(2020, 2, 16, 2, 0), 'v': 14, 'b': False}
def test_csv_reader_with_key_and_timestamp():
controller = build_flow([
ReadCSV('tests/test-with-timestamp.csv', header=True, key_field='k',
timestamp_field='t', timestamp_format='%d/%m/%Y %H:%M:%S'),
Reduce([], append_and_return, full_event=True),
]).run()
termination_result = controller.await_termination()
assert len(termination_result) == 2
assert termination_result[0].key == 'm1'
assert termination_result[0].time == datetime(2020, 2, 15, 2, 0)
assert termination_result[0].body == ['m1', datetime(2020, 2, 15, 2, 0), 8, True]
assert termination_result[1].key == 'm2'
assert termination_result[1].time == datetime(2020, 2, 16, 2, 0)
assert termination_result[1].body == ['m2', datetime(2020, 2, 16, 2, 0), 14, False]
def test_csv_reader_as_dict_no_header():
controller = build_flow([
ReadCSV('tests/test-no-header.csv', header=False, build_dict=True),
FlatMap(lambda x: [x[0], x[1], x[2]]),
Map(lambda x: int(x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
termination_result = controller.await_termination()
assert termination_result == 21
def test_dataframe_source():
df = pd.DataFrame([['hello', 1, 1.5], ['world', 2, 2.5]], columns=['string', 'int', 'float'])
controller = build_flow([
DataframeSource(df),
Reduce([], append_and_return),
]).run()
termination_result = controller.await_termination()
expected = [{'string': 'hello', 'int': 1, 'float': 1.5}, {'string': 'world', 'int': 2, 'float': 2.5}]
assert termination_result == expected
def test_indexed_dataframe_source():
df = | pd.DataFrame([['hello', 1, 1.5], ['world', 2, 2.5]], columns=['string', 'int', 'float']) | pandas.DataFrame |
'''
展示权重的重要性得分
'''
import os
import matplotlib.pyplot as plt
import pandas as pd
from pyecharts import options as opts
from pyecharts.charts import Timeline, Bar, HeatMap, Line, Page
from pyecharts.faker import Faker
from pyecharts.globals import ThemeType
import numpy as np
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
DATA_PATH = os.path.join(PROJECT_PATH, '../data/fishBehavior')
plt.rc('font', family='Times New Roman')
fontsize = 12.5
ANGLE_NAME = ['Angle_0.0', 'Angle_20.0', 'Angle_40.0', 'Angle_60.0', 'Angle_80.0', 'Angle_100.0', 'Angle_120.0',
'Angle_140.0', 'Angle_160.0']
ACC_NAME = ['AccSpeed_0.0','AccSpeed_2.0','AccSpeed_4.0','AccSpeed_6.0','AccSpeed_8.0']
def format_data(data: pd.DataFrame, time_list: list, name_list: list) -> dict:
data = data.T.to_dict()
fdata = {}
for t_id, vdata in data.items():
fdata[time_list[t_id]] = [v for region, v in vdata.items()]
for min_t in time_list:
temp = fdata[min_t]
for i in range(len(temp)):
fdata[min_t][i] = {"name": name_list[i], "value": temp[i]}
return fdata
#####################################################################################
# 2002 - 2011 年的数据
def get_year_overlap_chart(total_data, time_mim: int) -> Bar:
bar = (
Bar()
.add_xaxis(xaxis_data=name_list)
)
bar.add_yaxis(
series_name="velocity",
y_axis=total_data["velocity"][time_mim],
is_selected=True,
label_opts=opts.LabelOpts(is_show=False),
stack=f'stack1'
)
bar.add_yaxis(
series_name="distance",
y_axis=total_data["distance"][time_mim],
is_selected=True,
label_opts=opts.LabelOpts(is_show=False),
stack=f'stack1'
)
bar.add_yaxis(
series_name="velocity",
y_axis=total_data["velocity"][time_mim],
is_selected=True,
label_opts=opts.LabelOpts(is_show=False),
stack=f'stack2'
)
bar.add_yaxis(
series_name="distance",
y_axis=total_data["distance"][time_mim],
is_selected=True,
label_opts=opts.LabelOpts(is_show=False),
stack=f'stack2'
)
# print(total_data["bottom_time"][time_mim])
# print(Faker.values())
# exit(33)
# bar.add_yaxis("moving time", [31, 58, 80, 26], stack="stack1", category_gap="50%")
# bar.add_yaxis("static time", [31, 58, 80, 26], stack="stack1", category_gap="50%")
bar.set_global_opts(
title_opts=opts.TitleOpts(
title="{}分钟后,斑马鱼运动指标".format(time_mim)
),
datazoom_opts=opts.DataZoomOpts(),
tooltip_opts=opts.TooltipOpts(
is_show=True, trigger="axis", axis_pointer_type="shadow"
),
)
return bar
def getLine(v_data, name):
l = (
Line()
.add_xaxis(xaxis_data=[str(_) for _ in time_list])
.add_yaxis(
series_name="1_1",
y_axis=v_data['1_1'],
label_opts=opts.LabelOpts(is_show=False),
)
.add_yaxis(
series_name="2_CK",
y_axis=v_data['2_CK'],
label_opts=opts.LabelOpts(is_show=False),
)
.add_yaxis(
series_name="3_1",
y_axis=v_data['3_1'],
label_opts=opts.LabelOpts(is_show=False),
)
.add_yaxis(
series_name="4_1",
y_axis=v_data['4_1'],
label_opts=opts.LabelOpts(is_show=False),
)
.set_series_opts(
areastyle_opts=opts.AreaStyleOpts(opacity=0.5),
label_opts=opts.LabelOpts(is_show=False),
)
.set_global_opts(
title_opts=opts.TitleOpts(title=name),
tooltip_opts=opts.TooltipOpts(trigger="axis"),
datazoom_opts=opts.DataZoomOpts(),
yaxis_opts=opts.AxisOpts(
type_="value",
axistick_opts=opts.AxisTickOpts(is_show=True),
splitline_opts=opts.SplitLineOpts(is_show=True),
),
xaxis_opts=opts.AxisOpts(type_="category", boundary_gap=False),
)
)
return l
def getStackBar(top_data, bottom_time, name1, name2, name):
def format(t):
region = {}
for i in name_list:
td = t[i].values
list1 = []
for v in td:
list1.append({
"value": v,
"percent": v,
})
region[i] = list1
return region
td = format(top_data)
bd = format(bottom_time)
c = (
Bar(init_opts=opts.InitOpts(theme=ThemeType.LIGHT))
.add_xaxis(["Time " + str(_) + ":" + "/".join(name_list) for _ in time_list])
)
for idx, i in enumerate(name_list):
c.add_yaxis(name1, td[i], stack=f'stack{idx}')
c.add_yaxis(name2, bd[i], stack=f'stack{idx}')
c.set_series_opts(
label_opts=opts.LabelOpts(is_show=False)
)
c.set_global_opts(
xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=-15)),
datazoom_opts=opts.DataZoomOpts(),
title_opts=opts.TitleOpts(title=name)
)
return c
def getHeatMap(data, time_list, name):
def formatHeatmapData(rdata):
heat_data = []
rdata = np.around(rdata, decimals=3)
for t in range(rdata.shape[0]):
for a in range(rdata.shape[1]):
heat_data.append([t, a, rdata[t][a]])
return heat_data
c = (
HeatMap()
)
c.add_xaxis(time_list)
for region_name, v in data.items():
heat_data = formatHeatmapData(data[region_name].values)
if 'Acceleration' in name:
c.add_yaxis(
region_name,
ACC_NAME,
heat_data,
label_opts=opts.LabelOpts(is_show=True, position="inside"),
)
elif 'Angle' in name:
c.add_yaxis(
region_name,
ANGLE_NAME,
heat_data,
label_opts=opts.LabelOpts(is_show=True, position="inside"),
)
c.set_global_opts(
title_opts=opts.TitleOpts(title=name),
datazoom_opts=opts.DataZoomOpts(),
visualmap_opts=opts.VisualMapOpts(min_=0, max_=1),
)
return c
if __name__ == '__main__':
import argparse
import pandas as pd
ap = argparse.ArgumentParser()
ap.add_argument("-tid", "--t_ID", default="D01")
ap.add_argument("-lid", "--l_ID", default="D02")
ap.add_argument("-rid", "--r_ID", default="D04")
ap.add_argument("-iP", "--indicatorPath", default="E:\\data\\3D_pre/exp_pre/indicators/")
ap.add_argument("-o", "--outputPath", default="E:\\data\\3D_pre/exp_pre/results/")
args = vars(ap.parse_args())
outputPath = args["outputPath"]
if not os.path.exists(outputPath):
os.mkdir(outputPath)
files = os.listdir(args["indicatorPath"])
all_nos = []
for ifile in files:
no = ifile.split("_")[0]
start_no, end_no = no.split("-")
str_start_no = start_no.zfill(4)
str_end_no = end_no.zfill(4)
if (str_start_no, str_end_no) in all_nos:
continue
else:
all_nos.append((str_start_no, str_end_no))
all_nos.sort()
time_list = [_ for _ in range(0, int(all_nos[-1][1]))]
total_data = {}
name_list = [
"1_1",
"2_CK",
"3_1",
"4_1"
]
v_data = pd.DataFrame()
d_data = pd.DataFrame()
top_data = pd.DataFrame()
bottom_time = pd.DataFrame()
stop_time = pd.DataFrame()
moving_time = pd.DataFrame()
angle_data = {region_name: None for region_name in name_list}
acc_data = {region_name: None for region_name in name_list}
for ino in all_nos:
no_v_data = pd.DataFrame()
no_d_data = pd.DataFrame()
no_top_data = pd.DataFrame()
no_bottom_time = pd.DataFrame()
no_stop_time = pd.DataFrame()
no_moving_time = pd.DataFrame()
for RegionName in name_list:
indicator_file = os.path.join(args["indicatorPath"], str(int(ino[0]))+"-"+str(int(ino[1]))+ "_" + RegionName)
print(indicator_file)
data = pd.read_csv(indicator_file)
# velocity and distance
no_v_data = pd.concat([no_v_data, data[['velocity']]], axis=1)
no_d_data = pd.concat([no_d_data, data[['distance']]], axis=1)
no_top_data = pd.concat([no_top_data, data[['top_time']]], axis=1)
no_bottom_time = pd.concat([no_bottom_time, data[['bottom_time']]], axis=1)
no_stop_time = pd.concat([no_stop_time, data[['stop_time']]], axis=1)
v_data = pd.concat([v_data, no_v_data], axis=0)
d_data = pd.concat([d_data, no_d_data], axis=0)
top_data = pd.concat([top_data, no_top_data], axis=0)
bottom_time = pd.concat([bottom_time, no_bottom_time], axis=0)
stop_time = | pd.concat([stop_time, no_stop_time], axis=0) | pandas.concat |
import pandas as pd
import numpy as np
import os
import random
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import VotingClassifier
"""
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
"""
import Extract_Data
import PCA_Reduction
random.seed(42)
np.random.seed(42)
path = os.path.abspath("../Dataset")
#df = pd.read_csv(os.path.join(path, 'CombinedData.csv'), encoding='utf-8')
#df = pd.read_csv(os.path.join(path, 'Feature_Extracted_Data.csv'), encoding='utf-8')
#df = pd.read_csv(os.path.join(path, 'PCA_Reduced_Data.csv'), encoding='utf-8')
#df = pd.read_csv(os.path.join(path, 'PCA_Reduced_Data_Original.csv'), encoding='utf-8')
def getClassifier(cname, prob=False):
if cname == 'LogisticRegression':
return LogisticRegression(random_state=42, solver='liblinear')
elif cname == 'RandomForest':
return RandomForestRegressor(n_estimators=20, random_state=0)
elif cname == 'SVM':
return SVC(kernel='linear', probability=prob)
elif cname == 'DecisionTreeClassifier':
return DecisionTreeClassifier(random_state=42)
elif cname == 'KNeighborsClassifier':
return KNeighborsClassifier(n_neighbors=5)
elif cname == 'GaussianNB':
return GaussianNB()
elif cname == 'VotingClassifier':
classifiers = ['LogisticRegression', 'SVM', 'KNeighborsClassifier']
fitted_classifiers = []
for name in classifiers:
fitted_classifiers.append(tuple([name, getClassifier(name, True)]))
return VotingClassifier(estimators=fitted_classifiers, voting='soft')
def CrossValidation(df,cname, n_splits=2):
X = df.iloc[:, :-1].values
y = df['Label'].values
skf = StratifiedKFold(n_splits=n_splits)
skf.get_n_splits(X, y)
all_test_labels = []
all_pred_labels = []
for i, (train_index, test_index) in enumerate(skf.split(X,y)):
#print("\n\n\nRUNNING FOLD: {}".format(i))
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
pca, X_train_df, minmax = PCA_Reduction.PCAReduction(pd.DataFrame(X_train))
X_train = X_train_df.values
X_test = minmax.transform(pd.DataFrame(X_test))
X_test = pca.transform( | pd.DataFrame(X_test) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 15:54:43 2020
Automatic tests for WRFlux.
Run WRF simulations with many different namelist settings, calculate tendencies, and
perform tests defined in testing.py.
@author: <NAME>
"""
import sys
from run_wrf.launch_jobs import launch_jobs
from run_wrf.tools import grid_combinations, Capturing
import shutil
from collections import OrderedDict as odict
import os
from wrflux import tools
from wrflux.test import testing
import pandas as pd
import importlib
import numpy as np
import datetime
from pathlib import Path
import glob
from config import config_test_tendencies_base as conf
pd.set_option("display.precision", 15)
now = datetime.datetime.now().isoformat()[:16]
test_path = Path(__file__).parent.absolute()
# %% settings
variables = ["q", "t", "u", "v", "w"]
# raise if tests fail
raise_error = True
# restore module_initialize_ideal.F after running tests
restore_init_module = True
# What to do if simulation output already exists: Skip run ('s'), overwrite ('o'),
# restart ('r') or backup files ('b').
exist = "s"
# skip postprocessing if data already exists
skip_exist = False
# no running of simulations only postprocessing
skip_running = False
save_results = True # save test results to csv tables
# builds to run simulations with
builds = ["org", "debug", "normal"]
# Change mapscale factors from 1 to random values around 1 to mimic real-case runs:
random_msf = True
# tests to perform
tests = testing.all_tests
# tests = ["budget", "decomp_sumdir", "decomp_sumcomp", "sgs",
# "dz_out", "adv_2nd", "w", "mass", "Y=0", "NaN", "dim_coords",
# "no_model_change"]
# tests = ["periodic"]
# keyword arguments for tests (mainly for plotting)
kw = dict(
avg_dims_error=["y", "bottom_top", "Time"], # dimensions over which to calculate error norms
# iloc={"x" : slice(5,-5)}, # integer-location based indexing before runnning tests
# loc={"comp" : ["trb_r"]}, # label based indexing before runnning tests
plot=True,
# plot_diff=True, #plot difference between forcing and tendency against tendency
discrete=True, # discrete colormap
# hue="comp",
ignore_missing_hue=True,
savefig=True,
)
# %% budget calculation methods
budget_methods = ["", "cartesian"]
budget_methods_2nd = ["cartesian 2nd"]
budget_methods_dzout = ["cartesian dz_out_x", "cartesian dz_out_z"]
# %%test functions
def test_all():
"""Define test simulations and start tests."""
# Define parameter grid for simulations
param_grids = {}
th = {"use_theta_m": [0, 1, 1], "output_dry_theta_fluxes": [False, False, True]}
o = np.arange(2, 7)
# names of parameter values for output filenames
# either dictionaries or lists (not for composite parameters)
param_names = {"th": ["thd", "thm", "thdm"],
"ieva": ["1"],
"h_adv_order": [2, 3],
"v_adv_order": [2, 3],
"adv_order": o,
"bc": ["open"],
"timing": ["short"],
"open_x": [True],
"open_y": [True],
"symm_x": [True],
"symm_y": [True]}
### param_grids["2nd no_debug"] = odict(adv_order=dict(h_sca_adv_order=[2], v_sca_adv_order=[2], h_mom_adv_order=[2], v_mom_adv_order=[2]))
# test processing only one variable at the time
s = "output_{}_fluxes"
d = {s.format(v): [1, 2, 3] for v in tools.all_variables}
param_names.update(d)
for v in tools.all_variables:
d = {s.format(v) + suff: [0, 0, 0] for v in tools.all_variables for suff in ["", "_add"]}
d[s.format(v)] = [1, 2, 3]
param_grids[s.format(v) + "_debug_only"] = {s.format(v): d}
param_grids["dz_out no_debug"] = odict(msf=1, input_sounding="wrflux_u")
param_grids["dz_out no_debug hor_avg"] = param_grids["dz_out no_debug"].copy()
param_grids["trb no_debug"] = odict(msf=1, input_sounding="wrflux_u",
timing=dict(
end_time=["2018-06-20_12:30:00"],
output_streams=[{24: ["meanout", conf.params["dt_f"] / 60.],
0: ["instout", 10.]}]))
param_grids["trb no_debug hor_avg"] = param_grids["trb no_debug"].copy()
param_grids["hor_avg no_debug msf=1"] = param_grids["dz_out no_debug"].copy() # for Y=0 test
param_grids["hor_avg no_debug"] = odict()
param_grids["chunking xy no_debug"] = odict(chunks={"x": 10, "y": 10})
param_grids["chunking x no_debug"] = odict(chunks={"x": 10})
param_grids["chunking x hor_avg no_debug"] = param_grids["chunking x no_debug"].copy()
param_grids["no density-weighting"] = odict(hesselberg_avg=[False]) # TODO also skip hessel in postproc?
param_grids["serial"] = odict(lx=[5000], ly=[5000])
param_grids["theta - 300K no_debug"] = odict(th=th, theta_pert=True)
param_grids["avg_interval"] = odict(avg_interval=20, output_streams=[{24: ["meanout", 30.], 0: ["instout", 10.]}])
param_grids["km_opt"] = odict(km_opt=[2, 5], spec_hfx=[0.2, None], th=th)
param_grids["PBL scheme with theta moist+dry"] = odict(bl_pbl_physics=[1], th=th)
param_grids["2nd-order advection th variations"] = odict(use_theta_m=[0, 1],
adv_order=dict(h_sca_adv_order=2,
v_sca_adv_order=2,
h_mom_adv_order=2,
v_mom_adv_order=2))
param_grids["simple and positive-definite advection"] = odict(
moist_adv_opt=[0, 1],
adv_order=dict(h_sca_adv_order=o, v_sca_adv_order=o, h_mom_adv_order=o, v_mom_adv_order=o))
dx = 1000
param_grids["ieva"] = odict(use_theta_m=1, ieva=dict(zadvect_implicit=1, dt_f=10, dx=dx,
lx=20*dx, ly=20*dx, dzmax=100, nz=None, hm=0, spec_hfx=0.3))
param_grids["WENO advection"] = odict(
moist_adv_opt=[3, 4], scalar_adv_opt=[3], momentum_adv_opt=[3], th=th)
param_grids["monotonic advection"] = odict(moist_adv_opt=[2], v_sca_adv_order=[3, 5], th=th)
param_grids["MP + CU"] = odict(cu_physics=16, shcu_physics=2, bl_pbl_physics=9, mp_physics=2, th=th)
param_grids["damp2_diff6"] = odict(damp_opt=2, diff_6th_opt=1, th=th)
param_grids["damp3"] = odict(damp_opt=3)
param_grids["w_damping"] = odict(w_damping=1, dz0=30, dzmax=50, dx=1000, lx=20000, ly=20000, dt_f=10)
hm = 0 # flat simulations in boundaries are not periodic
param_grids["open BC x"] = odict(open_x=dict(open_xs=[True], open_xe=[True], periodic_x=[False],
hm=hm, spec_hfx=[0.2], input_sounding="free"))
param_grids["open BC y"] = odict(open_y=dict(open_ys=[True], open_ye=[True], periodic_y=[False],
hm=hm, spec_hfx=[0.2], input_sounding="free"))
param_grids["open BC y hor_avg"] = param_grids["open BC y"].copy()
param_grids["symmetric BC x"] = odict(symm_x=dict(symmetric_xs=[True], symmetric_xe=[True], periodic_x=[False],
hm=hm, spec_hfx=[0.2], input_sounding="free"))
param_grids["symmetric BC y"] = odict(symm_y=dict(symmetric_ys=[True], symmetric_ye=[True], periodic_y=[False],
hm=hm, spec_hfx=[0.2], input_sounding="free"))
param_grids["symmetric BC y hor_avg"] = param_grids["symmetric BC y"].copy()
failed, failed_short, err, err_short, err_diff, err_ratio = run_and_test(param_grids, param_names, avg_dims=["y"])
return failed, failed_short, err, err_short, err_diff, err_ratio
# %% run_and_test
def run_and_test(param_grids, param_names, avg_dims=None):
"""Run test simulations defined by param_grids and config_file and perform tests."""
index = pd.MultiIndex.from_product([["INIT", "RUN"] + tests, variables])
failed = pd.DataFrame(index=index)
failed_short = pd.DataFrame(columns=variables)
index = | pd.MultiIndex.from_product([tests, variables]) | pandas.MultiIndex.from_product |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.