prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from ._learner import KilobotLearner
import os
import gc
import logging
from typing import Generator
from cluster_work import InvalidParameterArgument
from kb_learning.kernel import KilobotEnvKernel
from kb_learning.kernel import compute_median_bandwidth, compute_median_bandwidth_kilobots, angle_from_swarm_mean, \
step_towards_center
from kb_learning.ac_reps.lstd import LeastSquaresTemporalDifference
from kb_learning.ac_reps.reps import ActorCriticReps
from kb_learning.ac_reps.spwgp import SparseWeightedGP
from kb_learning.envs.sampler import ParallelSARSSampler
from kb_learning.envs import register_object_relative_env
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
logger = logging.getLogger('kb_learning')
class ACRepsLearner(KilobotLearner):
_restore_supported = True
_default_params = {
'sampling': {
'num_kilobots': 15,
'w_factor': .0,
'num_episodes': 100,
'num_steps_per_episode': 125,
'num_SARS_samples': 10000,
'num_workers': None,
'object_shape': 'quad',
'object_width': .15,
'object_height': .15,
'save_trajectories': True,
'light_type': 'circular',
'light_radius': .2,
'observe_object': False
},
'kernel': {
'kb_dist': 'embedded',
'l_dist': 'maha',
'a_dist': 'maha',
'w_dist': 'maha',
'bandwidth_factor_kb': .3,
'bandwidth_factor_light': .55,
'bandwidth_factor_action': .8,
'bandwidth_factor_extra': .3,
'rho': .5,
'variance': 1.,
},
'learn_iterations': 1,
'lstd': {
'discount_factor': .99,
'num_features': 1000,
'num_policy_samples': 5,
},
'ac_reps': {
'epsilon': .3,
'alpha': .0
},
'gp': {
'prior_variance': 7e-5,
'noise_variance': 1e-5,
'num_sparse_states': 1000,
'kb_dist': 'embedded',
'l_dist': 'maha',
'w_dist': 'maha',
'bandwidth_factor_kb': .3,
'bandwidth_factor_light': .55,
'bandwidth_factor_extra': .3,
'rho': .5,
'use_prior_mean': True
},
'eval': {
'num_episodes': 50,
'num_steps_per_episode': 100,
'save_trajectories': True
}
}
def __init__(self):
super().__init__()
self.state_preprocessor = None
self.state_kernel = None
self.state_action_kernel = None
self.policy = None
self.sars = None
self.theta = None
self.it_sars = None
self.eval_sars = None
self.it_info = None
self.eval_info = None
self.lstd_samples = None
self.sampler = None
self.kilobots_columns = None
self.light_columns = None
self.object_columns = None
self.extra_columns = None
self.action_columns = None
self.reward_columns = None
self.state_columns = None
self.next_state_columns = None
self.sars_columns = None
self.state_object_columns = None
self._light_dimensions = None
self._action_dimensions = None
def iterate(self, config: dict, rep: int, n: int) -> dict:
sampling_params = self._params['sampling']
np.random.seed(self._seed)
logger.info('sampling environment')
self.sampler.seed = self._seed + 123
self.it_sars, self.it_info = self.sampler(self.policy)
samples_episode_reward = self.it_sars['R'].groupby(level=0).sum()
samples_episode_reward_mean = samples_episode_reward.mean()
logger.info('samples episode reward mean: {:.6f}'.format(samples_episode_reward_mean))
# add samples to data set and select subset
_extended_sars = self.sars.append(self.it_sars, ignore_index=True)
# _extended_sars.reset_index(drop=True)
if n == 0:
kernel_params = self._params['kernel']
gp_params = self._params['gp']
# compute kernel parameters
logger.debug('computing kernel bandwidths.')
if self.state_preprocessor:
bandwidth_kb = compute_median_bandwidth(_extended_sars[self.kilobots_columns], sample_size=500,
preprocessor=self.state_preprocessor)
else:
bandwidth_kb = compute_median_bandwidth_kilobots(_extended_sars[self.kilobots_columns], sample_size=500)
self.state_kernel.kilobots_bandwidth = bandwidth_kb * kernel_params['bandwidth_factor_kb']
self.state_action_kernel.kilobots_bandwidth = bandwidth_kb * kernel_params['bandwidth_factor_kb']
self.policy.kernel.kilobots_bandwidth = bandwidth_kb * gp_params['bandwidth_factor_kb']
if self._light_dimensions:
bandwidth_l = compute_median_bandwidth(_extended_sars[self.light_columns], sample_size=500)
self.state_kernel.light_bandwidth = bandwidth_l * kernel_params['bandwidth_factor_light']
self.state_action_kernel.light_bandwidth = bandwidth_l * kernel_params['bandwidth_factor_light']
self.policy.kernel.light_bandwidth = bandwidth_l * gp_params['bandwidth_factor_light']
if self.extra_columns is not None:
bandwidth_extra = compute_median_bandwidth(_extended_sars[self.extra_columns], sample_size=500)
self.state_kernel.extra_dim_bandwidth = bandwidth_extra * kernel_params['bandwidth_factor_extra']
self.state_action_kernel.extra_dim_bandwidth = bandwidth_extra * kernel_params['bandwidth_factor_extra']
self.policy.kernel.extra_dim_bandwidth = bandwidth_extra * gp_params['bandwidth_factor_extra']
# self.state_kernel.extra_dim_bandwidth = kernel_params['bandwidth_factor_extra']
# self.state_action_kernel.extra_dim_bandwidth = kernel_params['bandwidth_factor_extra']
# self.policy.kernel.extra_dim_bandwidth = gp_params['bandwidth_factor_extra']
bandwidth_a = compute_median_bandwidth(_extended_sars['A'], sample_size=500)
bandwidth_a *= kernel_params['bandwidth_factor_action']
self.state_action_kernel.action_bandwidth = bandwidth_a
logger.debug('selecting SARS samples.')
if _extended_sars.shape[0] <= sampling_params['num_SARS_samples']:
self.sars = _extended_sars
else:
# _sampling_weights = _extended_sars.R.values - _extended_sars.R.min()
self.sars = _extended_sars.sample(sampling_params['num_SARS_samples']) # , weights=_sampling_weights)
self.sars.reset_index(drop=True, inplace=True)
del _extended_sars
# compute feature matrices
logger.debug('selecting lstd samples.')
# lstd_reference = select_reference_set_by_kernel_activation(data=self.sars[['S', 'A']],
# size=self._params['lstd']['num_features'],
# kernel_function=self.state_kernel,
# batch_size=10)
#
# self.lstd_samples = self.sars.loc[lstd_reference][['S', 'A']]
self.lstd_samples = self.sars[['S', 'A']].sample(self._params['lstd']['num_features'])
def state_features(state):
if state.ndim == 1:
state = state.reshape((1, -1))
return self.state_kernel(state, self.lstd_samples['S'].values)
self._old_state_features = state_features
def state_action_features(state, action):
if state.ndim == 1:
state = state.reshape((1, -1))
if action.ndim == 1:
action = action.reshape((1, -1))
return self.state_action_kernel(np.c_[state, action], self.lstd_samples.values)
self._old_state_action_features = state_action_features
# compute state-action features
logger.info('compute state-action features')
phi_SA = state_action_features(self.sars['S'].values, self.sars[['A']].values)
# compute state features
logger.info('compute state features')
phi_S = state_features(self.sars['S'].values)
for i in range(self._params['learn_iterations']):
# compute next state-action features
logger.info('compute next state-action features')
policy_samples = self._params['lstd']['num_policy_samples']
# next_actions = np.array([self.policy(self.sars['S_'].values) for _ in range(policy_samples)]).mean(axis=0)
next_actions = self.policy.get_mean(self.sars['S_'].values)
phi_SA_next = state_action_features(self.sars['S_'].values, next_actions)
# learn theta (parameters of Q-function) using lstd
logger.info('learning theta [LSTD]')
lstd = LeastSquaresTemporalDifference()
lstd.discount_factor = self._params['lstd']['discount_factor']
self.theta = lstd.learn_q_function(phi_SA, phi_SA_next, rewards=self.sars[['R']].values)
# compute q-function
logger.debug('compute q-function')
q_fct = phi_SA.dot(self.theta)
# TODO add generalized advantage here
# compute sample weights using AC-REPS
logger.info('learning weights [AC-REPS]')
ac_reps = ActorCriticReps()
ac_reps.epsilon = self._params['ac_reps']['epsilon']
ac_reps.alpha = self._params['ac_reps']['alpha']
weights, self.theta = ac_reps.compute_weights(q_fct, phi_S, return_theta=True)
# get subset for sparse GP
logger.debug('select samples for GP')
# weights_sort_index = np.argsort(weights)
# gp_reference = pd.Index(weights_sort_index[-self._params['gp']['num_sparse_states']:])
gp_reference = pd.Index(np.random.choice(self.sars.index.values,
size=self._params['gp']['num_sparse_states'],
replace=False, p=weights))
# gp_reference = select_reference_set_by_kernel_activation(data=self._SARS[['S']],
# size=self._params['gp']['num_sparse_states'],
# kernel_function=self._state_kernel,
# batch_size=10, start_from=lstd_reference)
gp_samples = self.sars['S'].loc[gp_reference].values
# gp_samples = self._SARS['S'].sample(self._params['gp']['num_sparse_states']).values
# fit weighted GP to samples
logger.info('fitting GP to policy')
# self.policy = self._init_policy(self.state_kernel,
# (self.sampler.env.action_space.low, self.sampler.env.action_space.high))
self.policy.train(inputs=self.sars['S'].values, outputs=self.sars['A'].values, weights=weights,
sparse_inputs=gp_samples, optimize=True)
# evaluate policy
logger.info('evaluating policy')
self.sampler.seed = 5555
self.policy.eval_mode = True
self.eval_sars, self.eval_info = self.sampler(self.policy,
num_episodes=self._params['eval']['num_episodes'],
num_steps_per_episode=self._params['eval'][
'num_steps_per_episode'])
self.policy.eval_mode = False
eval_episode_reward = self.eval_sars['R'].groupby(level=0).sum()
eval_episode_reward_mean = eval_episode_reward.mean()
logger.info('eval mean episode reward: {:.6f}'.format(eval_episode_reward_mean))
return_dict = dict(samples_episode_reward_mean=samples_episode_reward_mean,
eval_episode_reward_mean=eval_episode_reward_mean)
gc.collect()
return return_dict
def reset(self, config: dict, rep: int) -> None:
self._init_SARS()
self.state_kernel, self.state_action_kernel = self._init_kernels()
self.sampler = self._init_sampler()
self.policy = self._init_policy(output_bounds=(self.sampler.env.action_space.low,
self.sampler.env.action_space.high))
def finalize(self):
del self.sampler
self.sampler = None
def _init_kernels(self):
kernel_params = self._params['kernel']
sampling_params = self._params['sampling']
if kernel_params['kb_dist'] == 'mean':
from kb_learning.kernel import compute_mean_position
self.state_preprocessor = compute_mean_position
elif kernel_params['kb_dist'] == 'mean-cov':
from kb_learning.kernel import compute_mean_and_cov_position
self.state_preprocessor = compute_mean_and_cov_position
if sampling_params['light_type'] == 'circular':
self._light_dimensions = 2
self._action_dimensions = 2
elif sampling_params['light_type'] == 'linear':
self._light_dimensions = 0
self._action_dimensions = 1
elif sampling_params['light_type'] == 'dual':
self._light_dimensions = 4
self._action_dimensions = 4
extra_dim = 0
if sampling_params['w_factor'] is None:
extra_dim += 1
if sampling_params['observe_object']:
if sampling_params['observe_object'] == 'orientation':
extra_dim += 2
elif sampling_params['observe_object'] == 'position':
extra_dim += 2
elif sampling_params['observe_object'] == 'pose' or sampling_params['observe_object'] is True:
extra_dim += 4
state_kernel = KilobotEnvKernel(rho=kernel_params['rho'],
variance=kernel_params['variance'],
kilobots_dim=sampling_params['num_kilobots'] * 2,
light_dim=self._light_dimensions,
extra_dim=extra_dim,
kilobots_dist_class=kernel_params['kb_dist'],
light_dist_class=kernel_params['l_dist'],
extra_dim_dist_class=kernel_params['w_dist'])
state_action_kernel = KilobotEnvKernel(rho=kernel_params['rho'],
variance=kernel_params['variance'],
kilobots_dim=sampling_params['num_kilobots'] * 2,
light_dim=self._light_dimensions,
extra_dim=extra_dim,
action_dim=self._action_dimensions,
kilobots_dist_class=kernel_params['kb_dist'],
light_dist_class=kernel_params['l_dist'],
extra_dim_dist_class=kernel_params['w_dist'],
action_dist_class=kernel_params['a_dist'])
return state_kernel, state_action_kernel
def _init_policy(self, output_bounds):
kernel_params = self._params['gp']
gp_params = self._params['gp']
sampling_params = self._params['sampling']
extra_dim = 0
if sampling_params['w_factor'] is None:
extra_dim += 1
if sampling_params['observe_object']:
if sampling_params['observe_object'] == 'orientation':
extra_dim += 2
elif sampling_params['observe_object'] == 'position':
extra_dim += 2
elif sampling_params['observe_object'] == 'pose' or sampling_params['observe_object'] is True:
extra_dim += 4
gp_kernel = KilobotEnvKernel(rho=gp_params['rho'],
variance=gp_params['prior_variance'],
kilobots_dim=sampling_params['num_kilobots'] * 2,
light_dim=self._light_dimensions,
extra_dim=extra_dim,
kilobots_dist_class=kernel_params['kb_dist'],
light_dist_class=kernel_params['l_dist'],
extra_dim_dist_class=kernel_params['w_dist'])
gp_kernel.variance.fix()
if self._params['gp']['use_prior_mean']:
if sampling_params['light_type'] == 'circular':
mean_function = step_towards_center([-2, -1])
elif sampling_params['light_type'] == 'linear':
mean_function = angle_from_swarm_mean(range(sampling_params['num_kilobots'] * 2))
else:
raise InvalidParameterArgument('Unknown argument for parameter \'sampling.light_type\': \'{}\''.format(
sampling_params['light_type']))
else:
mean_function = None
policy = SparseWeightedGP(kernel=gp_kernel, noise_variance=self._params['gp']['noise_variance'],
mean_function=mean_function, output_bounds=output_bounds,
output_dim=output_bounds[0].shape[0])
return policy
def _init_SARS(self):
sampling_params = self._params['sampling']
kb_columns_level = ['kb_{}'.format(i) for i in range(self._params['sampling']['num_kilobots'])]
self.kilobots_columns = pd.MultiIndex.from_product([['S'], kb_columns_level, ['x', 'y']])
self.state_columns = self.kilobots_columns.copy()
self.state_object_columns = self.state_columns.copy()
self.object_columns = pd.MultiIndex.from_product([['S'], ['object'], ['x', 'y', 'theta']])
if sampling_params['light_type'] == 'circular':
self.light_columns = pd.MultiIndex.from_product([['S'], ['light'], ['x', 'y']])
self.action_columns = pd.MultiIndex.from_product([['A'], ['x', 'y'], ['']])
self.state_columns = self.state_columns.append(self.light_columns)
elif sampling_params['light_type'] == 'linear':
self.light_columns = pd.MultiIndex.from_product([['S'], ['light'], ['theta']])
self.action_columns = pd.MultiIndex.from_product([['A'], ['theta'], ['']])
elif sampling_params['light_type'] == 'dual':
self.light_columns = pd.MultiIndex.from_product([['S'], ['light1', 'light2'], ['x', 'y']])
self.action_columns = pd.MultiIndex.from_product([['A'], ['light1', 'light2'], ['x', 'y']])
self.state_columns = self.state_columns.append(self.light_columns)
self.state_object_columns = self.state_object_columns.append(self.light_columns)
self.extra_columns = None
if sampling_params['w_factor'] is None:
weight_columns = pd.MultiIndex.from_product([['S'], ['extra'], ['w']])
self.extra_columns = weight_columns
self.state_object_columns = self.state_object_columns.append(weight_columns)
if sampling_params['observe_object']:
object_observation_columns = None
if sampling_params['observe_object'] == 'orientation':
object_observation_columns = pd.MultiIndex.from_product([['S'], ['extra'], ['sin_o_t', 'cos_o_t']])
elif sampling_params['observe_object'] == 'position':
object_observation_columns = pd.MultiIndex.from_product([['S'], ['extra'], ['o_x', 'o_y']])
elif sampling_params['observe_object'] == 'pose' or sampling_params['observe_object'] is True:
object_observation_columns = pd.MultiIndex.from_product([['S'], ['extra'], ['o_x', 'o_y', 'sin_o_t', 'cos_o_t']])
if self.extra_columns is None:
self.extra_columns = object_observation_columns
else:
self.extra_columns = self.extra_columns.append(object_observation_columns)
if self.extra_columns is not None:
self.state_columns = self.state_columns.append(self.extra_columns)
# self.state_object_columns = self.state_object_columns.append(self.extra_columns)
self.state_object_columns = self.state_object_columns.append(self.object_columns)
self.reward_columns = pd.MultiIndex.from_arrays([['R'], [''], ['']])
self.next_state_columns = self.state_columns.copy()
self.next_state_columns.set_levels(['S_'], 0, inplace=True)
self.sars_columns = self.state_columns.append(self.action_columns).append(self.reward_columns).append(
self.next_state_columns)
self.sars = pd.DataFrame(columns=self.sars_columns)
def _init_sampler(self):
sampling_params = self._params['sampling']
self.SamplerClass = ParallelSARSSampler
# self.SamplerClass = SARSSampler
return self.SamplerClass(object_shape=sampling_params['object_shape'],
object_width=sampling_params['object_width'],
object_height=sampling_params['object_height'],
observe_object=sampling_params['observe_object'],
light_type=sampling_params['light_type'],
light_radius=sampling_params['light_radius'],
registration_function=register_object_relative_env,
num_episodes=sampling_params['num_episodes'],
num_steps_per_episode=sampling_params['num_steps_per_episode'],
num_kilobots=sampling_params['num_kilobots'],
sars_column_index=self.sars_columns,
state_column_index=self.state_object_columns,
w_factor=sampling_params['w_factor'],
num_workers=sampling_params['num_workers'],
seed=self._seed,
# mp_context='spawn')
mp_context=self._MP_CONTEXT)
def save_state(self, config: dict, rep: int, n: int) -> None:
# save policy
logger.info('pickling policy...')
policy_file_name = os.path.join(self._log_path_rep, 'policy_it{:02d}.pkl'.format(self._it))
with open(policy_file_name, mode='w+b') as policy_file:
pickle.dump(self.policy.to_dict(), policy_file)
# save kernels
logger.info('pickling kernel...')
state_kernel_file_name = os.path.join(self._log_path_rep, 'state_kernel_it{:02d}.pkl'.format(self._it))
with open(state_kernel_file_name, mode='w+b') as state_kernel_file:
pickle.dump(self.state_kernel.to_dict(), state_kernel_file)
state_action_kernel_file_name = os.path.join(self._log_path_rep,
'state_action_kernel_it{:02d}.pkl'.format(self._it))
with open(state_action_kernel_file_name, mode='w+b') as state_action_file:
pickle.dump(self.state_action_kernel.to_dict(), state_action_file)
if self._params['sampling']['save_trajectories']:
logger.info('pickling sampling trajectories...')
it_sars_file_name = os.path.join(self._log_path_rep, 'it_sars_it{:02d}.pkl'.format(self._it))
self.it_sars.to_pickle(it_sars_file_name, compression='gzip')
it_info_file_name = os.path.join(self._log_path_rep, 'it_info_it{:02d}.pkl'.format(self._it))
self.it_info.to_pickle(it_info_file_name, compression='gzip')
if self._params['eval']['save_trajectories']:
logger.info('pickling eval trajectories...')
eval_sars_file_name = os.path.join(self._log_path_rep, 'eval_sars_it{:02d}.pkl'.format(self._it))
self.eval_sars.to_pickle(eval_sars_file_name, compression='gzip')
eval_info_file_name = os.path.join(self._log_path_rep, 'eval_info_it{:02d}.pkl'.format(self._it))
self.eval_info.to_pickle(eval_info_file_name, compression='gzip')
# save theta
logger.info('pickling theta and lstd samples...')
theta_file_name = os.path.join(self._log_path_rep, 'theta_it{:02d}.npy'.format(self._it))
np.save(theta_file_name, self.theta)
lstd_samples_file_name = os.path.join(self._log_path_rep, 'lstd_samples_it{:02d}.pkl'.format(self._it))
self.lstd_samples.to_pickle(lstd_samples_file_name, compression='gzip')
# save SARS data
logger.info('pickling SARS...')
SARS_file_name = os.path.join(self._log_path_rep, 'last_SARS.pkl.gz')
self.sars.to_pickle(SARS_file_name, compression='gzip')
def restore_state(self, config: dict, rep: int, n: int) -> bool:
# restore policy
logger.info('restoring policy and kernel...')
policy_file_name = os.path.join(self._log_path_rep, 'policy_it{:02d}.pkl'.format(n))
with open(policy_file_name, mode='r+b') as policy_file:
policy_dict = pickle.load(policy_file)
self.policy = SparseWeightedGP.from_dict(policy_dict)
self.sampler.policy = self.policy
# restore kernel
logger.info('restoring policy and kernel...')
state_kernel_file_name = os.path.join(self._log_path_rep, 'state_kernel_it{:02d}.pkl'.format(n))
with open(state_kernel_file_name, mode='r+b') as state_kernel_file:
state_kernel_dict = pickle.load(state_kernel_file)
self.state_kernel = KilobotEnvKernel.from_dict(state_kernel_dict)
state_action_kernel_file_name = os.path.join(self._log_path_rep, 'state_action_kernel_it{:02d}.pkl'.format(n))
with open(state_action_kernel_file_name, mode='r+b') as state_action_kernel_file:
state_action_kernel_dict = pickle.load(state_action_kernel_file)
self.state_action_kernel = KilobotEnvKernel.from_dict(state_action_kernel_dict)
if self._params['sampling']['save_trajectories']:
logger.info('restoring sampling trajectories...')
it_sars_file_name = os.path.join(self._log_path_rep, 'it_sars_it{:02d}.pkl'.format(n))
self.it_sars = pd.read_pickle(it_sars_file_name, compression='gzip')
it_info_file_name = os.path.join(self._log_path_rep, 'it_info_it{:02d}.pkl'.format(n))
self.it_info = pd.read_pickle(it_info_file_name, compression='gzip')
if self._params['eval']['save_trajectories']:
logger.info('restoring eval trajectories...')
eval_sars_file_name = os.path.join(self._log_path_rep, 'eval_sars_it{:02d}.pkl'.format(n))
self.eval_sars = pd.read_pickle(eval_sars_file_name, compression='gzip')
eval_info_file_name = os.path.join(self._log_path_rep, 'eval_info_it{:02d}.pkl'.format(n))
self.eval_info = pd.read_pickle(eval_info_file_name, compression='gzip')
# save theta
logger.info('restoring theta and lstd samples...')
theta_file_name = os.path.join(self._log_path_rep, 'theta_it{:02d}.npy'.format(n))
self.theta = np.load(theta_file_name, self.theta)
lstd_samples_file_name = os.path.join(self._log_path_rep, 'lstd_samples_it{:02d}.pkl'.format(n))
self.lstd_samples =
|
pd.read_pickle(lstd_samples_file_name, compression='gzip')
|
pandas.read_pickle
|
# -*- coding: utf-8 -*-
"""System transmission plots.
This code creates transmission line and interface plots.
@author: <NAME>, <NAME>
"""
import os
import logging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.dates as mdates
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_library import PlotLibrary
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, DataSavedInModule,
UnderDevelopment, InputSheetError, MissingMetaData, UnsupportedAggregation, MissingZoneData)
class MPlot(PlotDataHelper):
"""transmission MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The transmission.py module contains methods that are
related to the transmission network.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.font_defaults = mconfig.parser("font_settings")
def line_util(self, **kwargs):
"""Creates a timeseries line plot of transmission lineflow utilization for each region.
Utilization is plotted between 0 and 1 on the y-axis.
The plot will default to showing the 10 highest utilized lines. A Line category
can also be passed instead, using the property field in the Marmot_plot_select.csv
Each scenarios is plotted on a separate Facet plot.
This methods calls _util() to create the figure.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._util(**kwargs)
return outputs
def line_hist(self, **kwargs):
"""Creates a histogram of transmission lineflow utilization for each region.
Utilization is plotted between 0 and 1 on the x-axis, with # lines on the y-axis.
Each bar is equal to a 0.05 utilization rate
The plot will default to showing all lines. A Line category can also be passed
instead using the property field in the Marmot_plot_select.csv
Each scenarios is plotted on a separate Facet plot.
This methods calls _util() and passes the hist=True argument to create the figure.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._util(hist=True, **kwargs)
return outputs
def _util(self, hist: bool = False, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates utilization plots, line plot and histograms
This methods is called from line_util() and line_hist()
Args:
hist (bool, optional): If True creates a histogram of utilization.
Defaults to False.
prop (str, optional): Optional PLEXOS line category to display.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios),
(True,"line_Import_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
# sets up x, y dimensions of plot
ncols, nrows = self.set_facet_col_row_dimensions(facet=True,
multi_scenario=self.Scenarios)
grid_size = ncols*nrows
# Used to calculate any excess axis to delete
plot_number = len(self.Scenarios)
excess_axs = grid_size - plot_number
for zone_input in self.Zones:
self.logger.info(f"For all lines touching Zone = {zone_input}")
mplt = PlotLibrary(nrows, ncols, sharey=True,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.1, hspace=0.25)
data_table=[]
for n, scenario in enumerate(self.Scenarios):
self.logger.info(f"Scenario = {str(scenario)}")
# gets correct metadata based on area aggregation
if self.AGG_BY=='zone':
zone_lines = self.meta.zone_lines(scenario)
else:
zone_lines = self.meta.region_lines(scenario)
try:
zone_lines = zone_lines.set_index([self.AGG_BY])
except:
self.logger.warning("Column to Aggregate by is missing")
continue
try:
zone_lines = zone_lines.xs(zone_input)
zone_lines=zone_lines['line_name'].unique()
except KeyError:
self.logger.warning('No data to plot for scenario')
outputs[zone_input] = MissingZoneData()
continue
flow = self["line_Flow"].get(scenario).copy()
#Limit to only lines touching to this zone
flow = flow[flow.index.get_level_values('line_name').isin(zone_lines)]
if self.shift_leapday == True:
flow = self.adjust_for_leapday(flow)
limits = self["line_Import_Limit"].get(scenario).copy()
limits = limits.droplevel('timestamp').drop_duplicates()
limits.mask(limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
# This checks for a nan in string. If no scenario selected, do nothing.
if pd.notna(prop):
self.logger.info(f"Line category = {str(prop)}")
line_relations = self.meta.lines(scenario).rename(columns={"name":"line_name"}).set_index(["line_name"])
flow=pd.merge(flow,line_relations, left_index=True,
right_index=True)
flow=flow[flow["category"] == prop]
flow=flow.drop('category',axis=1)
flow = pd.merge(flow,limits[0].abs(),on = 'line_name',how='left')
flow['Util']=(flow['0_x'].abs()/flow['0_y']).fillna(0)
#If greater than 1 because exceeds flow limit, report as 1
flow['Util'][flow['Util'] > 1] = 1
annual_util=flow['Util'].groupby(["line_name"]).mean().rename(scenario)
# top annual utilized lines
top_utilization = annual_util.nlargest(10, keep='first')
color_dict = dict(zip(self.Scenarios,self.color_list))
if hist == True:
mplt.histogram(annual_util, color_dict,label=scenario, sub_pos=n)
else:
for line in top_utilization.index.get_level_values(level='line_name').unique():
duration_curve = flow.loc[line].sort_values(by='Util',
ascending=False).reset_index(drop=True)
mplt.lineplot(duration_curve, 'Util' ,label=line, sub_pos=n)
axs[n].set_ylim((0,1.1))
data_table.append(annual_util)
mplt.add_legend()
#Remove extra axes
mplt.remove_excess_axs(excess_axs,grid_size)
# add facet labels
mplt.add_facet_labels(xlabels=self.xlabels,
ylabels = self.ylabels)
if hist == True:
if pd.notna(prop):
prop_name = 'All Lines'
else:
prop_name = prop
plt.ylabel('Number of lines', color='black',
rotation='vertical', labelpad=30)
plt.xlabel(f'Line Utilization: {prop_name}', color='black',
rotation='horizontal', labelpad=30)
else:
if pd.notna(prop):
prop_name ='Top 10 Lines'
else:
prop_name = prop
plt.ylabel(f'Line Utilization: {prop_name}', color='black',
rotation='vertical', labelpad=60)
plt.xlabel('Intervals', color='black',
rotation='horizontal', labelpad=20)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
try:
del annual_util,
except:
continue
Data_Out = pd.concat(data_table)
outputs[zone_input] = {'fig': fig,'data_table':Data_Out}
return outputs
def int_flow_ind(self, figure_name: str = None, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a line plot of interchange flows and their import and export limits.
Each interchange is potted on a separate facet plot.
The plot includes every interchange that originates or ends in the aggregation zone.
This can be adjusted by passing a comma separated string of interchanges to the property input.
The code will create either a timeseries or duration curve depending on
if the word 'duration_curve' is in the figure_name.
To make a duration curve, ensure the word 'duration_curve' is found in the figure_name.
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
prop (str, optional): Comma separated string of interchanges.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"interface_Flow",self.Scenarios),
(True,"interface_Import_Limit",self.Scenarios),
(True,"interface_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
scenario = self.Scenarios[0]
outputs = {}
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
for zone_input in self.Zones:
self.logger.info(f"For all interfaces touching Zone = {zone_input}")
Data_Table_Out = pd.DataFrame()
# gets correct metadata based on area aggregation
if self.AGG_BY=='zone':
zone_lines = self.meta.zone_lines(scenario)
else:
zone_lines = self.meta.region_lines(scenario)
try:
zone_lines = zone_lines.set_index([self.AGG_BY])
except:
self.logger.info("Column to Aggregate by is missing")
continue
zone_lines = zone_lines.xs(zone_input)
zone_lines = zone_lines['line_name'].unique()
#Map lines to interfaces
all_ints = self.meta.interface_lines(scenario) #Map lines to interfaces
all_ints.index = all_ints.line
ints = all_ints.loc[all_ints.index.intersection(zone_lines)]
#flow = flow[flow.index.get_level_values('interface_name').isin(ints.interface)] #Limit to only interfaces touching to this zone
#flow = flow.droplevel('interface_category')
export_limits = self["interface_Export_Limit"].get(scenario).copy().droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits.index.get_level_values('interface_name').isin(ints.interface)]
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced interfaces.
#Drop unnecessary columns.
export_limits.reset_index(inplace = True)
export_limits.drop(columns=['interface_category', 'units'], inplace=True)
export_limits.set_index('interface_name',inplace = True)
import_limits = self["interface_Import_Limit"].get(scenario).copy().droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits.index.get_level_values('interface_name').isin(ints.interface)]
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced interfaces.
reported_ints = import_limits.index.get_level_values('interface_name').unique()
#Drop unnecessary columns.
import_limits.reset_index(inplace = True)
import_limits.drop(columns=['interface_category', 'units'], inplace=True)
import_limits.set_index('interface_name',inplace = True)
#Extract time index
ti = self["interface_Flow"][self.Scenarios[0]].index.get_level_values('timestamp').unique()
if pd.notna(prop):
interf_list = prop.split(',')
self.logger.info('Plotting only interfaces specified in Marmot_plot_select.csv')
self.logger.info(interf_list)
else:
interf_list = reported_ints.copy()
self.logger.info('Plotting full time series results.')
xdim,ydim = self.set_x_y_dimension(len(interf_list))
mplt = PlotLibrary(ydim, xdim, squeeze=False,
ravel_axs=True)
fig, axs = mplt.get_figure()
grid_size = xdim * ydim
excess_axs = grid_size - len(interf_list)
plt.subplots_adjust(wspace=0.05, hspace=0.2)
missing_ints = 0
chunks = []
n = -1
for interf in interf_list:
n += 1
#Remove leading spaces
if interf[0] == ' ':
interf = interf[1:]
if interf in reported_ints:
chunks_interf = []
single_exp_lim = export_limits.loc[interf] / 1000 #TODO: Use auto unit converter
single_imp_lim = import_limits.loc[interf] / 1000
#Check if all hours have the same limit.
check = single_exp_lim.to_numpy()
identical = check[0] == check.all()
limits = pd.concat([single_exp_lim,single_imp_lim],axis = 1)
limits.columns = ['export limit','import limit']
limits.index = ti
for scenario in self.Scenarios:
flow = self["interface_Flow"].get(scenario)
single_int = flow.xs(interf, level='interface_name') / 1000
single_int.index = single_int.index.droplevel(['interface_category','units'])
single_int.columns = [interf]
single_int = single_int.reset_index().set_index('timestamp')
limits = limits.reset_index().set_index('timestamp')
if self.shift_leapday == True:
single_int = self.adjust_for_leapday(single_int)
if pd.notna(start_date_range):
single_int = single_int[start_date_range : end_date_range]
limits = limits[start_date_range : end_date_range]
if duration_curve:
single_int = self.sort_duration(single_int,interf)
mplt.lineplot(single_int, interf,
label=f"{scenario}\n interface flow",
sub_pos=n)
# Only print limits if it doesn't change monthly or if you are plotting a time series.
# Otherwise the limit lines could be misleading.
if not duration_curve or identical[0]:
if scenario == self.Scenarios[-1]:
#Only plot limits for last scenario.
limits_color_dict = {'export limit': 'red', 'import limit': 'green'}
mplt.lineplot(limits, 'export limit',
label='export limit', color=limits_color_dict,
linestyle='--', sub_pos=n)
mplt.lineplot(limits, 'import limit',
label='import limit', color=limits_color_dict,
linestyle='--', sub_pos=n)
#For output time series .csv
scenario_names = pd.Series([scenario] * len(single_int), name='Scenario')
single_int_out = single_int.set_index([scenario_names], append=True)
chunks_interf.append(single_int_out)
Data_out_line = pd.concat(chunks_interf,axis = 0)
Data_out_line.columns = [interf]
chunks.append(Data_out_line)
else:
self.logger.warning(f"{interf} not found in results. Have you tagged "
"it with the 'Must Report' property in PLEXOS?")
excess_axs += 1
missing_ints += 1
continue
axs[n].set_title(interf)
if not duration_curve:
mplt.set_subplot_timeseries_format(sub_pos=n)
if missing_ints == len(interf_list):
outputs = MissingInputData()
return outputs
Data_Table_Out = pd.concat(chunks,axis = 1)
Data_Table_Out = Data_Table_Out.reset_index()
index_name = 'level_0' if duration_curve else 'timestamp'
Data_Table_Out = Data_Table_Out.pivot(index = index_name,columns = 'Scenario')
#Limits_Out = pd.concat(limits_chunks,axis = 1)
#Limits_Out.index = ['Export Limit','Import Limit']
# Data_Table_Out = Data_Table_Out.reset_index()
# Data_Table_Out = Data_Table_Out.groupby(Data_Table_Out.index // 24).mean()
# Data_Table_Out.index = pd.date_range(start = '1/1/2024',end = '12/31/2024',freq = 'D')
mplt.add_legend()
plt.ylabel('Flow (GW)', color='black', rotation='vertical',
labelpad=30)
if duration_curve:
plt.xlabel('Sorted hour of the year', color='black', labelpad=30)
plt.tight_layout(rect=[0, 0.03, 1, 0.97])
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
#Limits_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Interface_Limits.csv'))
return outputs
def int_flow_ind_seasonal(self, figure_name: str = None, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""#TODO: Finish Docstring
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
prop (str, optional): Comma separated string of interchanges.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
#TODO: Use auto unit converter in method
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"interface_Flow",self.Scenarios),
(True,"interface_Import_Limit",self.Scenarios),
(True,"interface_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
scenario = self.Scenarios[0]
outputs = {}
for zone_input in self.Zones:
self.logger.info("For all interfaces touching Zone = "+zone_input)
Data_Table_Out = pd.DataFrame()
# gets correct metadata based on area aggregation
if self.AGG_BY=='zone':
zone_lines = self.meta.zone_lines(scenario)
else:
zone_lines = self.meta.region_lines(scenario)
try:
zone_lines = zone_lines.set_index([self.AGG_BY])
except:
self.logger.info("Column to Aggregate by is missing")
continue
zone_lines = zone_lines.xs(zone_input)
zone_lines = zone_lines['line_name'].unique()
#Map lines to interfaces
all_ints = self.meta.interface_lines(scenario) #Map lines to interfaces
all_ints.index = all_ints.line
ints = all_ints.loc[all_ints.index.intersection(zone_lines)]
#flow = flow[flow.index.get_level_values('interface_name').isin(ints.interface)] #Limit to only interfaces touching to this zone
#flow = flow.droplevel('interface_category')
export_limits = self["interface_Export_Limit"].get(scenario).droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits.index.get_level_values('interface_name').isin(ints.interface)]
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced interfaces.
#Drop unnecessary columns.
export_limits.reset_index(inplace = True)
export_limits.drop(columns = 'interface_category',inplace = True)
export_limits.set_index('interface_name',inplace = True)
import_limits = self["interface_Import_Limit"].get(scenario).droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits.index.get_level_values('interface_name').isin(ints.interface)]
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced interfaces.
reported_ints = import_limits.index.get_level_values('interface_name').unique()
#Drop unnecessary columns.
import_limits.reset_index(inplace = True)
import_limits.drop(columns = 'interface_category',inplace = True)
import_limits.set_index('interface_name',inplace = True)
#Extract time index
ti = self["interface_Flow"][self.Scenarios[0]].index.get_level_values('timestamp').unique()
if prop != '':
interf_list = prop.split(',')
self.logger.info('Plotting only interfaces specified in Marmot_plot_select.csv')
self.logger.info(interf_list)
else:
interf_list = reported_ints.copy()
self.logger.info('Carving out season from ' + start_date_range + ' to ' + end_date_range)
#Remove missing interfaces from the list.
for interf in interf_list:
#Remove leading spaces
if interf[0] == ' ':
interf = interf[1:]
if interf not in reported_ints:
self.logger.warning(interf + ' not found in results.')
interf_list.remove(interf)
if not interf_list:
outputs = MissingInputData()
return outputs
xdim = 2
ydim = len(interf_list)
mplt = PlotLibrary(ydim, xdim, squeeze=False)
fig, axs = mplt.get_figure()
grid_size = xdim * ydim
excess_axs = grid_size - len(interf_list)
plt.subplots_adjust(wspace=0.05, hspace=0.2)
missing_ints = 0
chunks = []
limits_chunks = []
n = -1
for interf in interf_list:
n += 1
#Remove leading spaces
if interf[0] == ' ':
interf = interf[1:]
chunks_interf = []
single_exp_lim = export_limits.loc[interf] / 1000
single_imp_lim = import_limits.loc[interf] / 1000
#Check if all hours have the same limit.
check = single_exp_lim.to_numpy()
identical = check[0] == check.all()
limits = pd.concat([single_exp_lim,single_imp_lim],axis = 1)
limits.columns = ['export limit','import limit']
limits.index = ti
for scenario in self.Scenarios:
flow = self["interface_Flow"].get(scenario)
single_int = flow.xs(interf,level = 'interface_name') / 1000
single_int.index = single_int.index.droplevel('interface_category')
single_int.columns = [interf]
if self.shift_leapday == True:
single_int = self.adjust_for_leapday(single_int)
summer = single_int[start_date_range:end_date_range]
winter = single_int.drop(summer.index)
summer_lim = limits[start_date_range:end_date_range]
winter_lim = limits.drop(summer.index)
if duration_curve:
summer = self.sort_duration(summer,interf)
winter = self.sort_duration(winter,interf)
summer_lim = self.sort_duration(summer_lim,'export limit')
winter_lim = self.sort_duration(winter_lim,'export limit')
axs[n,0].plot(summer[interf],linewidth = 1,label = scenario + '\n interface flow')
axs[n,1].plot(winter[interf],linewidth = 1,label = scenario + '\n interface flow')
if scenario == self.Scenarios[-1]:
for col in summer_lim:
limits_color_dict = {'export limit': 'red', 'import limit': 'green'}
axs[n,0].plot(summer_lim[col], linewidth=1, linestyle='--',
color=limits_color_dict[col], label=col)
axs[n,1].plot(winter_lim[col], linewidth=1, linestyle='--',
color=limits_color_dict[col], label=col)
#For output time series .csv
scenario_names = pd.Series([scenario] * len(single_int), name='Scenario')
single_int_out = single_int.set_index([scenario_names], append=True)
chunks_interf.append(single_int_out)
Data_out_line = pd.concat(chunks_interf,axis = 0)
Data_out_line.columns = [interf]
chunks.append(Data_out_line)
axs[n,0].set_title(interf)
axs[n,1].set_title(interf)
if not duration_curve:
locator = mdates.AutoDateLocator(minticks=4, maxticks=8)
formatter = mdates.ConciseDateFormatter(locator)
formatter.formats[2] = '%d\n %b'
formatter.zero_formats[1] = '%b\n %Y'
formatter.zero_formats[2] = '%d\n %b'
formatter.zero_formats[3] = '%H:%M\n %d-%b'
formatter.offset_formats[3] = '%b %Y'
formatter.show_offset = False
axs[n,0].xaxis.set_major_locator(locator)
axs[n,0].xaxis.set_major_formatter(formatter)
axs[n,1].xaxis.set_major_locator(locator)
axs[n,1].xaxis.set_major_formatter(formatter)
mplt.add_legend()
Data_Table_Out = pd.concat(chunks,axis = 1)
#Limits_Out = pd.concat(limits_chunks,axis = 1)
#Limits_Out.index = ['Export Limit','Import Limit']
plt.ylabel('Flow (GW)', color='black', rotation='vertical', labelpad=30)
if duration_curve:
plt.xlabel('Sorted hour of the year', color = 'black', labelpad = 30)
fig.text(0.15,0.98,'Summer (' + start_date_range + ' to ' + end_date_range + ')',fontsize = 16)
fig.text(0.58,0.98,'Winter',fontsize = 16)
plt.tight_layout(rect=[0, 0.03, 1, 0.97])
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
#Limits_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Interface_Limits.csv'))
return outputs
#TODO: re-organize parameters (self vs. not self)
def int_flow_ind_diff(self, figure_name: str = None, **_):
"""Plot under development
This method plots the hourly difference in interface flow between two scenarios for
individual interfaces, with a facet for each interface.
The two scenarios are defined in the "Scenario_Diff" row of Marmot_user_defined_inputs.
The interfaces are specified in the plot properties field of Marmot_plot_select.csv (column 4).
The figure and data tables are saved within the module.
Returns:
UnderDevelopment(): Exception class, plot is not functional.
"""
return UnderDevelopment() # TODO: add new get_data method
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
check_input_data = []
Flow_Collection = {}
Import_Limit_Collection = {}
Export_Limit_Collection = {}
check_input_data.extend([get_data(Flow_Collection,"interface_Flow",self.Marmot_Solutions_folder, self.Scenarios)])
check_input_data.extend([get_data(Import_Limit_Collection,"interface_Import_Limit",self.Marmot_Solutions_folder, self.Scenarios)])
check_input_data.extend([get_data(Export_Limit_Collection,"interface_Export_Limit",self.Marmot_Solutions_folder, self.Scenarios)])
if 1 in check_input_data:
outputs = MissingInputData()
return outputs
scenario = self.Scenarios[0]
outputs = {}
if not pd.isnull(self.start_date):
self.logger.info("Plotting specific date range: \
{} to {}".format(str(self.start_date),str(self.end_date)))
for zone_input in self.Zones:
self.logger.info("For all interfaces touching Zone = "+zone_input)
Data_Table_Out = pd.DataFrame()
# gets correct metadata based on area aggregation
if self.AGG_BY=='zone':
zone_lines = self.meta.zone_lines(scenario)
else:
zone_lines = self.meta.region_lines(scenario)
try:
zone_lines = zone_lines.set_index([self.AGG_BY])
except:
self.logger.info("Column to Aggregate by is missing")
continue
zone_lines = zone_lines.xs(zone_input)
zone_lines = zone_lines['line_name'].unique()
#Map lines to interfaces
all_ints = self.meta.interface_lines(scenario) #Map lines to interfaces
all_ints.index = all_ints.line
ints = all_ints.loc[all_ints.index.intersection(zone_lines)]
#flow = flow[flow.index.get_level_values('interface_name').isin(ints.interface)] #Limit to only interfaces touching to this zone
#flow = flow.droplevel('interface_category')
export_limits = Export_Limit_Collection.get(scenario).droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits.index.get_level_values('interface_name').isin(ints.interface)]
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced interfaces.
#Drop unnecessary columns.
export_limits.reset_index(inplace = True)
export_limits.drop(columns = 'interface_category',inplace = True)
export_limits.set_index('interface_name',inplace = True)
import_limits = Import_Limit_Collection.get(scenario).droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits.index.get_level_values('interface_name').isin(ints.interface)]
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced interfaces.
reported_ints = import_limits.index.get_level_values('interface_name').unique()
#Drop unnecessary columns.
import_limits.reset_index(inplace = True)
import_limits.drop(columns = 'interface_category',inplace = True)
import_limits.set_index('interface_name',inplace = True)
#Extract time index
ti = Flow_Collection[self.Scenarios[0]].index.get_level_values('timestamp').unique()
if self.prop != '':
interf_list = self.prop.split(',')
self.logger.info('Plotting only interfaces specified in Marmot_plot_select.csv')
self.logger.info(interf_list)
else:
interf_list = reported_ints.copy()
self.logger.info('Plotting full time series results.')
xdim,ydim = self.set_x_y_dimension(len(interf_list))
mplt = PlotLibrary(nrows, ncols,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
grid_size = xdim * ydim
excess_axs = grid_size - len(interf_list)
plt.subplots_adjust(wspace=0.05, hspace=0.2)
missing_ints = 0
chunks = []
limits_chunks = []
n = -1
for interf in interf_list:
n += 1
#Remove leading spaces
if interf[0] == ' ':
interf = interf[1:]
if interf in reported_ints:
chunks_interf = []
single_exp_lim = export_limits.loc[interf] / 1000 #TODO: Use auto unit converter in method
single_imp_lim = import_limits.loc[interf] / 1000
#Check if all hours have the same limit.
check = single_exp_lim.to_numpy()
identical = check[0] == check.all()
limits = pd.concat([single_exp_lim,single_imp_lim],axis = 1)
limits.columns = ['export limit','import limit']
limits.index = ti
for scenario in self.Scenarios:
flow = Flow_Collection.get(scenario)
single_int = flow.xs(interf,level = 'interface_name') / 1000
single_int.index = single_int.index.droplevel('interface_category')
single_int.columns = [interf]
if self.shift_leapday == True:
single_int = self.adjust_for_leapday(single_int)
single_int = single_int.reset_index().set_index('timestamp')
limits = limits.reset_index().set_index('timestamp')
if not pd.isnull(self.start_date):
single_int = single_int[self.start_date : self.end_date]
limits = limits[self.start_date : self.end_date]
if duration_curve:
single_int = self.sort_duration(single_int,interf)
mplt.lineplot(single_int,interf,label = scenario + '\n interface flow', sub_pos = n)
#Only print limits if it doesn't change monthly or if you are plotting a time series. Otherwise the limit lines could be misleading.
if not duration_curve or identical[0]:
if scenario == self.Scenarios[-1]:
#Only plot limits for last scenario.
limits_color_dict = {'export limit': 'red', 'import limit': 'green'}
mplt.lineplot(limits,'export limit',label = 'export limit',color = limits_color_dict,linestyle = '--', sub_pos = n)
mplt.lineplot(limits,'import limit',label = 'import limit',color = limits_color_dict,linestyle = '--', sub_pos = n)
#For output time series .csv
scenario_names = pd.Series([scenario] * len(single_int),name = 'Scenario')
single_int_out = single_int.set_index([scenario_names],append = True)
chunks_interf.append(single_int_out)
Data_out_line = pd.concat(chunks_interf,axis = 0)
Data_out_line.columns = [interf]
chunks.append(Data_out_line)
else:
self.logger.warning(interf + ' not found in results. Have you tagged it with the "Must Report" property in PLEXOS?')
excess_axs += 1
missing_ints += 1
continue
axs[n].set_title(interf)
handles, labels = axs[n].get_legend_handles_labels()
if not duration_curve:
self.set_subplot_timeseries_format(axs, sub_pos=n)
if n == len(interf_list) - 1:
axs[n].legend(loc='lower left',bbox_to_anchor=(1.05,-0.2))
if missing_ints == len(interf_list):
outputs = MissingInputData()
return outputs
Data_Table_Out = pd.concat(chunks,axis = 1)
Data_Table_Out = Data_Table_Out.reset_index()
index_name = 'level_0' if duration_curve else 'timestamp'
Data_Table_Out = Data_Table_Out.pivot(index = index_name,columns = 'Scenario')
#Limits_Out = pd.concat(limits_chunks,axis = 1)
#Limits_Out.index = ['Export Limit','Import Limit']
# Data_Table_Out = Data_Table_Out.reset_index()
# Data_Table_Out = Data_Table_Out.groupby(Data_Table_Out.index // 24).mean()
# Data_Table_Out.index = pd.date_range(start = '1/1/2024',end = '12/31/2024',freq = 'D')
plt.ylabel('Flow (GW)', color='black', rotation='vertical', labelpad=30)
if duration_curve:
plt.xlabel('Sorted hour of the year', color = 'black', labelpad = 30)
plt.tight_layout(rect=[0, 0.03, 1, 0.97])
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
#Limits_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Interface_Limits.csv'))
return outputs
def line_flow_ind(self, figure_name: str = None, prop: str = None, **_):
"""
#TODO: Finish Docstring
This method plots flow, import and export limit, for individual transmission lines,
with a facet for each line.
The lines are specified in the plot properties field of Marmot_plot_select.csv (column 4).
The plot includes every interchange that originates or ends in the aggregation zone.
Figures and data tables are returned to plot_main
Args:
figure_name (str, optional): [description]. Defaults to None.
prop (str, optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
#TODO: Use auto unit converter in method
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios),
(True,"line_Import_Limit",self.Scenarios),
(True,"line_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
#Select only lines specified in Marmot_plot_select.csv.
select_lines = prop.split(",")
if select_lines == None:
return InputSheetError()
self.logger.info('Plotting only lines specified in Marmot_plot_select.csv')
self.logger.info(select_lines)
scenario = self.Scenarios[0] #Select single scenario for purpose of extracting limits.
export_limits = self["line_Export_Limit"].get(scenario).droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced lines.
import_limits = self["line_Import_Limit"].get(scenario).droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced lines.
flows = self["line_Flow"][scenario]
# limited_lines = []
# i = 0
# all_lines = flows.index.get_level_values('line_name').unique()
# for line in all_lines:
# i += 1
# print(line)
# print(i / len(all_lines))
# exp = export_limits.loc[line].squeeze()[0]
# imp = import_limits.loc[line].squeeze()[0]
# flow = flows.xs(line,level = 'line_name')[0].tolist()
# if exp in flow or imp in flow:
# limited_lines.append(line)
# print(limited_lines)
# pd.DataFrame(limited_lines).to_csv('/Users/mschwarz/OR OSW local/Solutions/Figures_Output/limited_lines.csv')
xdim,ydim = self.set_x_y_dimension(len(select_lines))
grid_size = xdim * ydim
excess_axs = grid_size - len(select_lines)
mplt = PlotLibrary(ydim, xdim, squeeze=False,
ravel_axs=True)
fig, axs = mplt.get_figure()
reported_lines = self["line_Flow"][self.Scenarios[0]].index.get_level_values('line_name').unique()
n = -1
missing_lines = 0
chunks = []
limits_chunks = []
for line in select_lines:
n += 1
#Remove leading spaces
if line[0] == ' ':
line = line[1:]
if line in reported_lines:
chunks_line = []
single_exp_lim = export_limits.loc[line]
single_imp_lim = import_limits.loc[line]
limits = pd.concat([single_exp_lim,single_imp_lim])
limits_chunks.append(limits)
single_exp_lim = single_exp_lim.squeeze()
single_imp_lim = single_imp_lim.squeeze()
# If export/import limits were pulled as an interval property, take the average.
if len(single_exp_lim) > 1:
single_exp_lim = single_exp_lim.mean()
single_imp_lim = single_imp_lim.mean()
limits = pd.Series([single_exp_lim,single_imp_lim],name = line)
limits_chunks.append(limits)
for scenario in self.Scenarios:
flow = self["line_Flow"][scenario]
single_line = flow.xs(line,level = 'line_name')
single_line = single_line.droplevel('units')
single_line.columns = [line]
if self.shift_leapday == True:
single_line = self.adjust_for_leapday(single_line)
single_line_out = single_line.copy()
if duration_curve:
single_line = self.sort_duration(single_line,line)
mplt.lineplot(single_line, line, label = scenario + '\n line flow', sub_pos=n)
#Add %congested number to plot.
if scenario == self.Scenarios[0]:
viol_exp = single_line[single_line[line] > single_exp_lim].count()
viol_imp = single_line[single_line[line] < single_imp_lim].count()
viol_perc = 100 * (viol_exp + viol_imp) / len(single_line)
viol_perc = round(viol_perc.squeeze(),3)
axs[n].annotate('Violation = ' + str(viol_perc) + '% of hours', xy = (0.1,0.15),xycoords='axes fraction')
cong_exp = single_line[single_line[line] == single_exp_lim].count()
cong_imp = single_line[single_line[line] == single_imp_lim].count()
cong_perc = 100 * (cong_exp + cong_imp) / len(single_line)
cong_perc = round(cong_perc.squeeze(),0)
axs[n].annotate('Congestion = ' + str(cong_perc) + '% of hours', xy = (0.1,0.1),xycoords='axes fraction')
#For output time series .csv
scenario_names = pd.Series([scenario] * len(single_line_out),name = 'Scenario')
single_line_out = single_line_out.set_index([scenario_names],append = True)
chunks_line.append(single_line_out)
Data_out_line = pd.concat(chunks_line,axis = 0)
chunks.append(Data_out_line)
else:
self.logger.warning(line + ' not found in results. Have you tagged it with the "Must Report" property in PLEXOS?')
excess_axs += 1
missing_lines += 1
continue
mplt.remove_excess_axs(excess_axs,grid_size)
axs[n].axhline(y = single_exp_lim, ls = '--',label = 'Export Limit',color = 'red')
axs[n].axhline(y = single_imp_lim, ls = '--',label = 'Import Limit', color = 'green')
axs[n].set_title(line)
if not duration_curve:
mplt.set_subplot_timeseries_format(sub_pos=n)
if missing_lines == len(select_lines):
outputs = MissingInputData()
return outputs
Data_Table_Out = pd.concat(chunks,axis = 1)
#Limits_Out = pd.concat(limits_chunks,axis = 1)
#Limits_Out.index = ['Export Limit','Import Limit']
mplt.add_legend()
plt.ylabel('Flow (MW)', color='black', rotation='vertical', labelpad=30)
#plt.tight_layout(rect=[0, 0.03, 1, 0.97])
plt.tight_layout()
fn_suffix = '_duration_curve' if duration_curve else ''
fig.savefig(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission',figure_name + fn_suffix + '.svg'), dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission',figure_name + fn_suffix + '.csv'))
# Limits_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission',figure_name + 'limits.csv'))
outputs = DataSavedInModule()
return outputs
def line_flow_ind_diff(self, figure_name: str = None,
prop: str = None, **_):
"""
#TODO: Finish Docstring
This method plots the flow difference for individual transmission lines, with a facet for each line.
The scenarios are specified in the "Scenario_Diff_plot" field of Marmot_user_defined_inputs.csv.
The lines are specified in the plot properties field of Marmot_plot_select.csv (column 4).
Figures and data tables are saved in the module.
Args:
figure_name (str, optional): [description]. Defaults to None.
prop (str, optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
#TODO: Use auto unit converter in method
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
outputs = MissingInputData()
return outputs
#Select only lines specified in Marmot_plot_select.csv.
select_lines = prop.split(",")
if select_lines == None:
outputs = InputSheetError()
return outputs
self.logger.info('Plotting only lines specified in Marmot_plot_select.csv')
self.logger.info(select_lines)
flow_diff = self["line_Flow"].get(self.Scenario_Diff[1]) - self["line_Flow"].get(self.Scenario_Diff[0])
xdim,ydim = self.set_x_y_dimension(len(select_lines))
grid_size = xdim * ydim
excess_axs = grid_size - len(select_lines)
mplt = PlotLibrary(ydim, xdim, squeeze=False,
ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.05, hspace=0.2)
reported_lines = self["line_Flow"].get(self.Scenarios[0]).index.get_level_values('line_name').unique()
n = -1
missing_lines = 0
chunks = []
for line in select_lines:
n += 1
#Remove leading spaces
if line[0] == ' ':
line = line[1:]
if line in reported_lines:
single_line = flow_diff.xs(line,level = 'line_name')
single_line.columns = [line]
if self.shift_leapday == True:
single_line = self.adjust_for_leapday(single_line)
single_line_out = single_line.copy()
if duration_curve:
single_line = self.sort_duration(single_line,line)
#mplt.lineplot(single_line,line, label = self.Scenario_Diff[1] + ' - \n' + self.Scenario_Diff[0] + '\n line flow', sub_pos = n)
mplt.lineplot(single_line,line, label = 'BESS - no BESS \n line flow', sub_pos=n)
else:
self.logger.warning(line + ' not found in results. Have you tagged it with the "Must Report" property in PLEXOS?')
excess_axs += 1
missing_lines += 1
continue
mplt.remove_excess_axs(excess_axs,grid_size)
axs[n].set_title(line)
if not duration_curve:
mplt.set_subplot_timeseries_format(sub_pos=n)
chunks.append(single_line_out)
if missing_lines == len(select_lines):
outputs = MissingInputData()
return outputs
Data_Table_Out = pd.concat(chunks,axis = 1)
mplt.add_legend()
plt.ylabel('Flow difference (MW)', color='black', rotation='vertical', labelpad=30)
plt.tight_layout()
fn_suffix = '_duration_curve' if duration_curve else ''
fig.savefig(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission',figure_name + fn_suffix + '.svg'), dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission',figure_name + fn_suffix + '.csv'))
outputs = DataSavedInModule()
return outputs
def line_flow_ind_seasonal(self, figure_name: str = None, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""TODO: Finish Docstring.
This method differs from the previous method, in that it plots seasonal line limits.
To use this method, line import/export must be an "interval" property, not a "year" property.
This can be selected in "plexos_properties.csv".
Re-run the formatter if necessary, it will overwrite the existing properties in "*_formatted.h5"
This method plots flow, import and export limit, for individual transmission lines, with a facet for each line.
The lines are specified in the plot properties field of Marmot_plot_select.csv (column 4).
The plot includes every interchange that originates or ends in the aggregation zone.
Figures and data tables saved in the module.
Args:
figure_name (str, optional): [description]. Defaults to None.
prop (str, optional): [description]. Defaults to None.
start_date_range (str, optional): [description]. Defaults to None.
end_date_range (str, optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
#TODO: Use auto unit converter in method
if pd.isna(start_date_range):
self.logger.warning('You are attempting to plot a time series facetted by two seasons,\n\
but you are missing a value in the "Start Date" column of "Marmot_plot_select.csv" \
Please enter dates in "Start Date" and "End Date". These will define the bounds of \
one of your two seasons. The other season will be comprised of the rest of the year.')
return MissingInputData()
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios),
(True,"line_Import_Limit",self.Scenarios),
(True,"line_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
#Select only lines specified in Marmot_plot_select.csv.
select_lines = prop.split(",")
if select_lines == None:
return InputSheetError()
self.logger.info('Plotting only lines specified in Marmot_plot_select.csv')
self.logger.info(select_lines)
scenario = self.Scenarios[0]
#Line limits are seasonal.
export_limits = self["line_Export_Limit"].get(scenario).droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced lines.
import_limits = self["line_Import_Limit"].get(scenario).droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced lines.
#Extract time index
ti = self["line_Flow"][self.Scenarios[0]].index.get_level_values('timestamp').unique()
reported_lines = self["line_Flow"][self.Scenarios[0]].index.get_level_values('line_name').unique()
self.logger.info('Carving out season from ' + start_date_range + ' to ' + end_date_range)
#Remove missing interfaces from the list.
for line in select_lines:
#Remove leading spaces
if line[0] == ' ':
line = line[1:]
if line not in reported_lines:
self.logger.warning(line + ' not found in results.')
select_lines.remove(line)
if not select_lines:
outputs = MissingInputData()
return outputs
xdim = 2
ydim = len(select_lines)
grid_size = xdim * ydim
excess_axs = grid_size - len(select_lines)
mplt = PlotLibrary(ydim, xdim, squeeze=False)
fig, axs = mplt.get_figure()
i = -1
missing_lines = 0
chunks = []
limits_chunks = []
for line in select_lines:
i += 1
#Remove leading spaces
if line[0] == ' ':
line = line[1:]
chunks_line = []
single_exp_lim = export_limits.loc[line]
single_exp_lim.index = ti
single_imp_lim = import_limits.loc[line]
single_imp_lim.index = ti
limits = pd.concat([single_exp_lim,single_imp_lim],axis = 1)
limits.columns = ['export limit','import limit']
limits.index = ti
limits_chunks.append(limits)
for scenario in self.Scenarios:
flow = self["line_Flow"][scenario]
single_line = flow.xs(line,level = 'line_name')
single_line = single_line.droplevel('units')
single_line_out = single_line.copy()
single_line.columns = [line]
if self.shift_leapday == True:
single_line = self.adjust_for_leapday(single_line)
#Split into seasons.
summer = single_line[start_date_range : end_date_range]
winter = single_line.drop(summer.index)
summer_lim = limits[start_date_range:end_date_range]
winter_lim = limits.drop(summer.index)
if duration_curve:
summer = self.sort_duration(summer,line)
winter = self.sort_duration(winter,line)
summer_lim = self.sort_duration(summer_lim,'export limit')
winter_lim = self.sort_duration(winter_lim,'export limit')
axs[i,0].plot(summer[line],linewidth = 1,label = scenario + '\n line flow')
axs[i,1].plot(winter[line],linewidth = 1,label = scenario + '\n line flow')
if scenario == self.Scenarios[-1]:
for col in summer_lim:
limits_color_dict = {'export limit': 'red', 'import limit': 'green'}
axs[i,0].plot(summer_lim[col],linewidth = 1,linestyle = '--',color = limits_color_dict[col],label = col)
axs[i,1].plot(winter_lim[col],linewidth = 1,linestyle = '--',color = limits_color_dict[col],label = col)
for j in [0,1]:
axs[i,j].spines['right'].set_visible(False)
axs[i,j].spines['top'].set_visible(False)
axs[i,j].tick_params(axis='y', which='major', length=5, width=1)
axs[i,j].tick_params(axis='x', which='major', length=5, width=1)
axs[i,j].set_title(line)
if i == len(select_lines) - 1:
axs[i,j].legend(loc = 'lower left',bbox_to_anchor=(1.05,0),facecolor='inherit', frameon=True)
#For output time series .csv
scenario_names = pd.Series([scenario] * len(single_line_out),name = 'Scenario')
single_line_out.columns = [line]
single_line_out = single_line_out.set_index([scenario_names],append = True)
chunks_line.append(single_line_out)
Data_out_line = pd.concat(chunks_line,axis = 0)
chunks.append(Data_out_line)
if missing_lines == len(select_lines):
outputs = MissingInputData()
return outputs
Data_Table_Out = pd.concat(chunks,axis = 1)
#Limits_Out = pd.concat(limits_chunks,axis = 1)
#Limits_Out.index = ['Export Limit','Import Limit']
fig.text(0.3,1,'Summer (Jun - Sep)')
fig.text(0.6,1,'Winter (Jan - Mar,Oct - Dec)')
plt.ylabel('Flow (MW)', color='black', rotation='vertical', labelpad=30)
plt.tight_layout()
fn_suffix = '_duration_curve' if duration_curve else ''
fig.savefig(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Line_Flow' + fn_suffix + '_seasonal.svg'), dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Line_Flow' + fn_suffix + '_seasonal.csv'))
#Limits_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Line_Limits.csv'))
outputs = DataSavedInModule()
return outputs
def extract_tx_cap(self, **_):
"""Plot under development
Returns:
UnderDevelopment(): Exception class, plot is not functional.
"""
return UnderDevelopment() #TODO: Needs finishing
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"interface_Import_Limit",self.Scenarios),
(True,"interface_Export_Limit",self.Scenarios),
(True,"line_Import_Limit",self.Scenarios),
(True,"line_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for scenario in self.Scenarios:
self.logger.info(scenario)
for zone_input in self.Zones:
#Lines
# lines = self.meta.region_interregionallines(scenario)
# if scenario == 'ADS':
# zone_input = zone_input.split('_WI')[0]
# lines = self.meta_ADS.region_interregionallines()
# lines = lines[lines['region'] == zone_input]
# import_lim = self["line_Import_Limit"][scenario].reset_index()
# export_lim = self["line_Export_Limit"][scenario].reset_index()
# lines = lines.merge(import_lim,how = 'inner',on = 'line_name')
# lines = lines[['line_name',0]]
# lines.columns = ['line_name','import_limit']
# lines = lines.merge(export_lim, how = 'inner',on = 'line_name')
# lines = lines[['line_name','import_limit',0]]
# lines.columns = ['line_name','import_limit','export_limit']
# fn = os.path.join(self.Marmot_Solutions_folder, 'NARIS', 'Figures_Output',self.AGG_BY + '_transmission','Individual_Interregional_Line_Limits_' + scenario + '.csv')
# lines.to_csv(fn)
# lines = self.meta.region_intraregionallines(scenario)
# if scenario == 'ADS':
# lines = self.meta_ADS.region_intraregionallines()
# lines = lines[lines['region'] == zone_input]
# import_lim = self["line_Import_Limit"][scenario].reset_index()
# export_lim = self["line_Export_Limit"][scenario].reset_index()
# lines = lines.merge(import_lim,how = 'inner',on = 'line_name')
# lines = lines[['line_name',0]]
# lines.columns = ['line_name','import_limit']
# lines = lines.merge(export_lim, how = 'inner',on = 'line_name')
# lines = lines[['line_name','import_limit',0]]
# lines.columns = ['line_name','import_limit','export_limit']
# fn = os.path.join(self.Marmot_Solutions_folder, 'NARIS', 'Figures_Output',self.AGG_BY + '_transmission','Individual_Intraregional_Line_Limits_' + scenario + '.csv')
# lines.to_csv(fn)
#Interfaces
PSCo_ints = ['P39 TOT 5_WI','P40 TOT 7_WI']
int_import_lim = self["interface_Import_Limit"][scenario].reset_index()
int_export_lim = self["interface_Export_Limit"][scenario].reset_index()
if scenario == 'NARIS':
last_timestamp = int_import_lim['timestamp'].unique()[-1] #Last because ADS uses the last timestamp.
int_import_lim = int_import_lim[int_import_lim['timestamp'] == last_timestamp]
int_export_lim = int_export_lim[int_export_lim['timestamp'] == last_timestamp]
lines2ints = self.meta_ADS.interface_lines()
else:
lines2ints = self.meta.interface_lines(scenario)
fn = os.path.join(self.Marmot_Solutions_folder, 'NARIS', 'Figures_Output',self.AGG_BY + '_transmission','test_meta_' + scenario + '.csv')
lines2ints.to_csv(fn)
ints = pd.merge(int_import_lim,int_export_lim,how = 'inner', on = 'interface_name')
ints.rename(columns = {'0_x':'import_limit','0_y': 'export_limit'},inplace = True)
all_lines_in_ints = lines2ints['line'].unique()
test = [line for line in lines['line_name'].unique() if line in all_lines_in_ints]
ints = ints.merge(lines2ints, how = 'inner', left_on = 'interface_name',right_on = 'interface')
def region_region_interchange_all_scenarios(self, **kwargs):
"""
#TODO: Finish Docstring
This method creates a timeseries line plot of interchange flows between the selected region
to each connecting region.
If there are more than 4 total interchanges, all other interchanges are aggregated into an 'other' grouping
Each scenarios is plotted on a separate Facet plot.
Figures and data tables are returned to plot_main
"""
outputs = self._region_region_interchange(self.Scenarios, **kwargs)
return outputs
def region_region_interchange_all_regions(self, **kwargs):
"""
#TODO: Finish Docstring
This method creates a timeseries line plot of interchange flows between the selected region
to each connecting region. All regions are plotted on a single figure with each focus region placed on a separate
facet plot
If there are more than 4 total interchanges, all other interchanges are aggregated into an 'other' grouping
This figure only plots a single scenario that is defined by Main_scenario_plot in user_defined_inputs.csv.
Figures and data tables are saved within method
"""
outputs = self._region_region_interchange([self.Scenarios[0]],plot_scenario=False, **kwargs)
return outputs
def _region_region_interchange(self, scenario_type: str, plot_scenario: bool = True,
timezone: str = "", **_):
"""#TODO: Finish Docstring
Args:
scenario_type (str): [description]
plot_scenario (bool, optional): [description]. Defaults to True.
timezone (str, optional): [description]. Defaults to "".
Returns:
[type]: [description]
"""
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,f"{agg}_{agg}s_Net_Interchange",scenario_type)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"Zone = {zone_input}")
ncols, nrows = self.set_facet_col_row_dimensions(multi_scenario=scenario_type)
mplt = PlotLibrary(nrows, ncols, sharey=True,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.6, hspace=0.3)
data_table_chunks=[]
n=0
for scenario in scenario_type:
rr_int = self[f"{agg}_{agg}s_Net_Interchange"].get(scenario)
if self.shift_leapday == True:
rr_int = self.adjust_for_leapday(rr_int)
# For plot_main handeling - need to find better solution
if plot_scenario == False:
outputs={}
for zone_input in self.Zones:
outputs[zone_input] = pd.DataFrame()
if self.AGG_BY != 'region' and self.AGG_BY != 'zone':
agg_region_mapping = self.Region_Mapping[['region',self.AGG_BY]].set_index('region').to_dict()[self.AGG_BY]
# Checks if keys all aggregate to a single value, this plot requires multiple values to work
if len(set(agg_region_mapping.values())) == 1:
return UnsupportedAggregation()
rr_int = rr_int.reset_index()
rr_int['parent'] = rr_int['parent'].map(agg_region_mapping)
rr_int['child'] = rr_int['child'].map(agg_region_mapping)
rr_int_agg = rr_int.groupby(['timestamp','parent','child'],as_index=True).sum()
rr_int_agg.rename(columns = {0:'flow (MW)'}, inplace = True)
rr_int_agg = rr_int_agg.reset_index()
# If plotting all regions update plot setup
if plot_scenario == False:
#Make a facet plot, one panel for each parent zone.
parent_region = rr_int_agg['parent'].unique()
plot_number = len(parent_region)
ncols, nrows = self.set_x_y_dimension(plot_number)
mplt = PlotLibrary(nrows, ncols,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.6, hspace=0.7)
else:
parent_region = [zone_input]
plot_number = len(scenario_type)
grid_size = ncols*nrows
excess_axs = grid_size - plot_number
for parent in parent_region:
single_parent = rr_int_agg[rr_int_agg['parent'] == parent]
single_parent = single_parent.pivot(index = 'timestamp',columns = 'child',values = 'flow (MW)')
single_parent = single_parent.loc[:,(single_parent != 0).any(axis = 0)] #Remove all 0 columns (uninteresting).
if (parent in single_parent.columns):
single_parent = single_parent.drop(columns = [parent]) #Remove columns if parent = child
#Neaten up lines: if more than 4 total interchanges, aggregated all but the highest 3.
if len(single_parent.columns) > 4:
# Set the "three highest zonal interchanges" for all three scenarios.
cols_dontagg = single_parent.max().abs().sort_values(ascending = False)[0:3].index
df_dontagg = single_parent[cols_dontagg]
df_toagg = single_parent.drop(columns = cols_dontagg)
agged = df_toagg.sum(axis = 1)
df_dontagg.insert(len(df_dontagg.columns),'Other',agged)
single_parent = df_dontagg.copy()
#Convert units
if n == 0:
unitconversion = self.capacity_energy_unitconversion(single_parent)
single_parent = single_parent / unitconversion['divisor']
for column in single_parent.columns:
mplt.lineplot(single_parent, column, label=column, sub_pos=n)
axs[n].set_title(parent)
axs[n].margins(x=0.01)
mplt.set_subplot_timeseries_format(sub_pos=n)
axs[n].hlines(y = 0, xmin = axs[n].get_xlim()[0], xmax = axs[n].get_xlim()[1], linestyle = ':') #Add horizontal line at 0.
axs[n].legend(loc='lower left',bbox_to_anchor=(1,0))
n+=1
# Create data table for each scenario
scenario_names = pd.Series([scenario]*len(single_parent),name='Scenario')
data_table = single_parent.add_suffix(f" ({unitconversion['units']})")
data_table = data_table.set_index([scenario_names],append=True)
data_table_chunks.append(data_table)
# if plotting all scenarios add facet labels
if plot_scenario == True:
mplt.add_facet_labels(xlabels=self.xlabels,
ylabels = self.ylabels)
#Remove extra axes
mplt.remove_excess_axs(excess_axs, grid_size)
plt.xlabel(timezone, color='black', rotation='horizontal',labelpad = 30)
plt.ylabel(f"Net Interchange ({unitconversion['units']})", color='black', rotation='vertical', labelpad = 40)
# If plotting all regions save output and return none plot_main
if plot_scenario == False:
# Location to save to
Data_Table_Out = rr_int_agg
save_figures = os.path.join(self.figure_folder, self.AGG_BY + '_transmission')
fig.savefig(os.path.join(save_figures, "Region_Region_Interchange_{}.svg".format(self.Scenarios[0])), dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(save_figures, "Region_Region_Interchange_{}.csv".format(self.Scenarios[0])))
outputs = DataSavedInModule()
return outputs
Data_Out = pd.concat(data_table_chunks, copy=False, axis=0)
# if plotting all scenarios return figures to plot_main
outputs[zone_input] = {'fig': fig,'data_table':Data_Out}
return outputs
def region_region_checkerboard(self, **_):
"""Creates a checkerboard/heatmap figure showing total interchanges between regions/zones.
Each scenario is plotted on its own facet plot.
Plots and Data are saved within the module.
Returns:
DataSavedInModule: DataSavedInModule exception.
"""
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,f"{agg}_{agg}s_Net_Interchange",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
ncols, nrows = self.set_x_y_dimension(len(self.Scenarios))
grid_size = ncols*nrows
excess_axs = grid_size - len(self.Scenarios)
mplt = PlotLibrary(nrows, ncols,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.02, hspace=0.4)
max_flow_group = []
Data_Out = []
n=0
for scenario in self.Scenarios:
rr_int = self[f"{agg}_{agg}s_Net_Interchange"].get(scenario)
if self.shift_leapday == True:
rr_int = self.adjust_for_leapday(rr_int)
if self.AGG_BY != 'region' and self.AGG_BY != 'zone':
agg_region_mapping = self.Region_Mapping[['region',self.AGG_BY]].set_index('region').to_dict()[self.AGG_BY]
# Checks if keys all aggregate to a single value, this plot requires multiple values to work
if len(set(agg_region_mapping.values())) == 1:
return UnsupportedAggregation()
rr_int = rr_int.reset_index()
rr_int['parent'] = rr_int['parent'].map(agg_region_mapping)
rr_int['child'] = rr_int['child'].map(agg_region_mapping)
rr_int_agg = rr_int.groupby(['parent','child'],as_index=True).sum()
rr_int_agg.rename(columns = {0:'flow (MW)'}, inplace = True)
rr_int_agg=rr_int_agg.loc[rr_int_agg['flow (MW)']>0.01] # Keep only positive flows
rr_int_agg.sort_values(ascending=False,by='flow (MW)')
rr_int_agg = rr_int_agg/1000 # MWh -> GWh
data_out = rr_int_agg.copy()
data_out.rename(columns={'flow (MW)':'{} flow (GWh)'.format(scenario)},inplace=True)
max_flow = max(rr_int_agg['flow (MW)'])
rr_int_agg = rr_int_agg.unstack('child')
rr_int_agg = rr_int_agg.droplevel(level = 0, axis = 1)
current_cmap = plt.cm.get_cmap()
current_cmap.set_bad(color='grey')
axs[n].imshow(rr_int_agg)
axs[n].set_xticks(np.arange(rr_int_agg.shape[1]))
axs[n].set_yticks(np.arange(rr_int_agg.shape[0]))
axs[n].set_xticklabels(rr_int_agg.columns)
axs[n].set_yticklabels(rr_int_agg.index)
axs[n].set_title(scenario.replace('_',' '),fontweight='bold')
# Rotate the tick labels and set their alignment.
plt.setp(axs[n].get_xticklabels(), rotation=30, ha="right",
rotation_mode="anchor")
#Delineate the boxes and make room at top and bottom
axs[n].set_xticks(np.arange(rr_int_agg.shape[1]+1)-.5, minor=True)
axs[n].set_yticks(np.arange(rr_int_agg.shape[0]+1)-.5, minor=True)
axs[n].grid(which="minor", color="k", linestyle='-', linewidth=1)
axs[n].tick_params(which="minor", bottom=False, left=False)
max_flow_group.append(max_flow)
Data_Out.append(data_out)
n+=1
#Remove extra axes
mplt.remove_excess_axs(excess_axs,grid_size)
cmap = cm.inferno
norm = mcolors.Normalize(vmin=0, vmax=max(max_flow_group))
cax = plt.axes([0.90, 0.1, 0.035, 0.8])
fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap),
cax=cax, label='Total Net Interchange [GWh]')
plt.xlabel('To Region', color='black', rotation='horizontal',
labelpad=40)
plt.ylabel('From Region', color='black', rotation='vertical',
labelpad=40)
Data_Table_Out = pd.concat(Data_Out,axis=1)
save_figures = os.path.join(self.figure_folder, f"{self.AGG_BY}_transmission")
fig.savefig(os.path.join(save_figures, "region_region_checkerboard.svg"),
dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(save_figures, "region_region_checkerboard.csv"))
outputs = DataSavedInModule()
return outputs
def line_violations_timeseries(self, **kwargs):
"""Creates a timeseries line plot of lineflow violations for each region.
The magnitude of each violation is plotted on the y-axis
Each sceanrio is plotted as a separate line.
This methods calls _violations() to create the figure.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._violations(**kwargs)
return outputs
def line_violations_totals(self, **kwargs):
"""Creates a barplot of total lineflow violations for each region.
Each sceanrio is plotted as a separate bar.
This methods calls _violations() and passes the total_violations=True argument
to create the figure.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._violations(total_violations=True, **kwargs)
return outputs
def _violations(self, total_violations: bool = False,
timezone: str = "",
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates line violation plots, line plot and barplots
This methods is called from line_violations_timeseries() and line_violations_totals()
Args:
total_violations (bool, optional): If True finds the sum of violations.
Used to create barplots. Defaults to False.
timezone (str, optional): The timezone to display on the x-axes.
Defaults to "".
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Violation",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f'Zone = {zone_input}')
all_scenarios = pd.DataFrame()
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {str(scenario)}")
if self.AGG_BY == 'zone':
lines = self.meta.zone_lines(scenario)
else:
lines = self.meta.region_lines(scenario)
line_v = self["line_Violation"].get(scenario)
line_v = line_v.reset_index()
viol = line_v.merge(lines,on = 'line_name',how = 'left')
if self.AGG_BY == 'zone':
viol = viol.groupby(["timestamp", "zone"]).sum()
else:
viol = viol.groupby(["timestamp", self.AGG_BY]).sum()
one_zone = viol.xs(zone_input, level = self.AGG_BY)
one_zone = one_zone.rename(columns = {0 : scenario})
one_zone = one_zone.abs() #We don't care the direction of the violation
all_scenarios = pd.concat([all_scenarios,one_zone], axis = 1)
all_scenarios.columns = all_scenarios.columns.str.replace('_',' ')
#remove columns that are all equal to 0
all_scenarios = all_scenarios.loc[:, (all_scenarios != 0).any(axis=0)]
if all_scenarios.empty:
outputs[zone_input] = MissingZoneData()
continue
unitconversion = self.capacity_energy_unitconversion(all_scenarios)
all_scenarios = all_scenarios/unitconversion['divisor']
Data_Table_Out = all_scenarios.add_suffix(f" ({unitconversion['units']})")
#Make scenario/color dictionary.
color_dict = dict(zip(all_scenarios.columns,self.color_list))
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
if total_violations==True:
all_scenarios_tot = all_scenarios.sum()
all_scenarios_tot.plot.bar(stacked=False, rot=0,
color=[color_dict.get(x, '#333333') for x in all_scenarios_tot.index],
linewidth='0.1', width=0.35, ax=ax)
else:
for column in all_scenarios:
mplt.lineplot(all_scenarios,column,color=color_dict,label=column)
ax.margins(x=0.01)
mplt.set_subplot_timeseries_format(minticks=6,maxticks=12)
ax.set_xlabel(timezone, color='black', rotation='horizontal')
mplt.add_legend()
if mconfig.parser("plot_title_as_region"):
fig.set_title(zone_input)
ax.set_ylabel(f"Line violations ({unitconversion['units']})", color='black', rotation='vertical')
outputs[zone_input] = {'fig': fig,'data_table':Data_Table_Out}
return outputs
def net_export(self, timezone: str = "",
start_date_range: str = None,
end_date_range: str = None, **_):
"""creates a timeseries net export line graph.
Scenarios are plotted as separate lines.
Args:
timezone (str, optional): The timezone to display on the x-axes.
Defaults to "".
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,f"{agg}_Net_Interchange",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
outputs = {}
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
net_export_all_scenarios = pd.DataFrame()
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
net_export_read = self[f"{agg}_Net_Interchange"].get(scenario)
if self.shift_leapday == True:
net_export_read = self.adjust_for_leapday(net_export_read)
net_export = net_export_read.xs(zone_input, level = self.AGG_BY)
net_export = net_export.groupby("timestamp").sum()
net_export.columns = [scenario]
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
net_export = net_export[start_date_range : end_date_range]
net_export_all_scenarios = pd.concat([net_export_all_scenarios,net_export], axis = 1)
net_export_all_scenarios.columns = net_export_all_scenarios.columns.str.replace('_', ' ')
unitconversion = self.capacity_energy_unitconversion(net_export_all_scenarios)
net_export_all_scenarios = net_export_all_scenarios/unitconversion["divisor"]
# Data table of values to return to main program
Data_Table_Out = net_export_all_scenarios.add_suffix(f" ({unitconversion['units']})")
#Make scenario/color dictionary.
color_dict = dict(zip(net_export_all_scenarios.columns,self.color_list))
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
plt.subplots_adjust(wspace=0.05, hspace=0.2)
if net_export_all_scenarios.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
for column in net_export_all_scenarios:
mplt.lineplot(net_export_all_scenarios,column,color_dict, label=column)
ax.set_ylabel(f'Net exports ({unitconversion["units"]})', color='black',
rotation='vertical')
ax.set_xlabel(timezone, color='black', rotation='horizontal')
ax.margins(x=0.01)
ax.hlines(y=0, xmin=ax.get_xlim()[0], xmax=ax.get_xlim()[1],
linestyle=':')
mplt.set_subplot_timeseries_format()
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def zonal_interchange(self, figure_name: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a line plot of the net interchange between each zone, with a facet for each zone.
The method will only work if agg_by = "zone".
The code will create either a timeseries or duration curve depending on
if the word 'duration_curve' is in the figure_name.
To make a duration curve, ensure the word 'duration_curve' is found in the figure_name.
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
if self.AGG_BY not in ["zone", "zones", "Zone", "Zones"]:
self.logger.warning("This plot only supports aggregation zone")
return UnsupportedAggregation()
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
outputs = {}
# sets up x, y dimensions of plot
ncols, nrows = self.set_facet_col_row_dimensions(multi_scenario=self.Scenarios)
grid_size = ncols*nrows
# Used to calculate any excess axis to delete
plot_number = len(self.Scenarios)
excess_axs = grid_size - plot_number
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
mplt = PlotLibrary(nrows, ncols, sharey=True,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.1, hspace=0.5)
net_exports_all = []
for n, scenario in enumerate(self.Scenarios):
net_exports = []
exp_lines = self.meta.zone_exporting_lines(scenario)
imp_lines = self.meta.zone_importing_lines(scenario)
if exp_lines.empty or imp_lines.empty:
return MissingMetaData()
exp_lines.columns = ['region','line_name']
imp_lines.columns = ['region','line_name']
#Find list of lines that connect each region.
exp_oz = exp_lines[exp_lines['region'] == zone_input]
imp_oz = imp_lines[imp_lines['region'] == zone_input]
other_zones = self.meta.zones(scenario).name.tolist()
try:
other_zones.remove(zone_input)
except:
self.logger.warning("Are you sure you set agg_by = zone?")
self.logger.info(f"Scenario = {str(scenario)}")
flow = self["line_Flow"][scenario].copy()
if self.shift_leapday == True:
flow = self.adjust_for_leapday(flow)
flow = flow.reset_index()
for other_zone in other_zones:
exp_other_oz = exp_lines[exp_lines['region'] == other_zone]
imp_other_oz = imp_lines[imp_lines['region'] == other_zone]
exp_pair = pd.merge(exp_oz, imp_other_oz, left_on='line_name',
right_on='line_name')
imp_pair = pd.merge(imp_oz, exp_other_oz, left_on='line_name',
right_on='line_name')
#Swap columns for importing lines
imp_pair = imp_pair.reindex(columns=['region_from', 'line_name', 'region_to'])
export = flow[flow['line_name'].isin(exp_pair['line_name'])]
imports = flow[flow['line_name'].isin(imp_pair['line_name'])]
export = export.groupby(['timestamp']).sum()
imports = imports.groupby(['timestamp']).sum()
#Check for situations where there are only exporting or importing lines for this zonal pair.
if imports.empty:
net_export = export
elif export.empty:
net_export = -imports
else:
net_export = export - imports
net_export.columns = [other_zone]
if pd.notna(start_date_range):
if other_zone == [other_zones[0]]:
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
net_export = net_export[start_date_range : end_date_range]
if duration_curve:
net_export = self.sort_duration(net_export,other_zone)
net_exports.append(net_export)
net_exports = pd.concat(net_exports,axis = 1)
net_exports = net_exports.dropna(axis = 'columns')
net_exports.index =
|
pd.to_datetime(net_exports.index)
|
pandas.to_datetime
|
'''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | |
| . . | ___ __| | ___| |___
| |\/| |/ _ \ / _` |/ _ \ / __|
| | | | (_) | (_| | __/ \__ \
\_| |_/\___/ \__,_|\___|_|___/
Make model predictions using this load.py script. This loads in all models in this
directory and makes predictions on a target folder. Note that files in this target
directory will be featurized with the default features as specified by the settings.json.
Usage: python3 load.py [target directory] [sampletype] [target model directory]
Example: python3 load.py /Users/jim/desktop/allie/load_dir audio /Users/jim/desktop/gender_tpot_classifier
Alt Usage: python3 load.py
--> this just loads all the models and makes predictions in the ./load_dir
'''
import os, json, pickle, time, sys, shutil
import pandas as pd
import numpy as np
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def most_common(lst):
'''
get most common item in a list
'''
return max(set(lst), key=lst.count)
def model_schema():
models={'audio': dict(),
'text': dict(),
'image': dict(),
'video': dict(),
'csv': dict()
}
return models
def classifyfolder(listdir):
filetypes=list()
for i in range(len(listdir)):
if listdir[i].endswith(('.mp3', '.wav')):
filetypes.append('audio')
elif listdir[i].endswith(('.png', '.jpg')):
filetypes.append('image')
elif listdir[i].endswith(('.txt')):
filetypes.append('text')
elif listdir[i].endswith(('.mp4', '.avi')):
filetypes.append('video')
elif listdir[i].endswith(('.csv')):
filetypes.append('csv')
filetypes=list(set(filetypes))
return filetypes
def get_features(models, actual_model_dir, sampletype):
models=models['%s_models'%(sampletype)]
features=list()
for i in range(len(models)):
os.chdir(actual_model_dir+'/'+models[i])
temp_settings=json.load(open('settings.json'))
features=features+temp_settings['default_%s_features'%(sampletype)]
# get only the necessary features for all models
default_features=list(set(features))
return default_features
def featurize(features_dir, load_dir, model_dir, filetypes, models):
# contextually load the proper features based on the model information
actual_model_dir=prev_dir(features_dir)+'/models/'+model_dir
# get default features
sampletype=model_dir.split('_')[0]
default_features=get_features(models, actual_model_dir, sampletype)
# now change to proper directory for featurization
if model_dir=='audio_models' and 'audio' in filetypes:
os.chdir(features_dir+'/audio_features')
elif model_dir=='text_models' and 'text' in filetypes:
models=models['text_models']
os.chdir(features_dir+'/text_features')
elif model_dir=='image_models' and 'image' in filetypes:
models=models['image_models']
os.chdir(features_dir+'/image_features')
elif model_dir=='video_models' and 'video' in filetypes:
models=models['video_models']
os.chdir(features_dir+'/video_features')
elif model_dir=='csv_models' and 'csv' in filetypes:
models=models['csv_models']
os.chdir(features_dir+'/csv_features')
# call featurization API via default features
for i in range(len(default_features)):
print(os.getcwd())
os.system('python3 featurize.py %s %s'%(load_dir, default_features[i]))
def find_files(model_dir):
print(model_dir)
jsonfiles=list()
csvfiles=list()
if model_dir == 'audio_models':
listdir=os.listdir()
print(listdir)
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.wav') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir == 'text_models':
listdir=os.listdir()
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.txt') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir == 'image_models':
listdir=os.listdir()
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.png') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir == 'video_models':
listdir=os.listdir()
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.mp4') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir =='csv_models':
# csv files are a little different here
listdir=os.listdir()
for i in range(len(listdir)):
csvfile='featurized_'+listdir[i]
if listdir[i].endswith('.csv') and csvfile in listdir:
csvfiles.append(csvfile)
else:
jsonfiles=[]
print(jsonfiles)
return jsonfiles, csvfiles
def make_predictions(sampletype, transformer, clf, modeltype, jsonfiles, csvfiles, default_features, classes, modeldata, model_dir):
'''
get the metrics associated iwth a classification and regression problem
and output a .JSON file with the training session.
'''
sampletype=sampletype.split('_')[0]
if sampletype != 'csv':
for k in range(len(jsonfiles)):
try:
g=json.load(open(jsonfiles[k]))
print(sampletype)
print(g)
features=list()
print(default_features)
for j in range(len(default_features)):
print(sampletype)
features=features+g['features'][sampletype][default_features[j]]['features']
labels=g['features'][sampletype][default_features[0]]['labels']
print(transformer)
print(features)
if transformer != '':
features=np.array(transformer.transform(np.array(features).reshape(1, -1))).reshape(1, -1)
else:
features=np.array(features).reshape(1,-1)
print(features)
metrics_=dict()
print(modeltype)
if modeltype not in ['autogluon', 'autokeras', 'autopytorch', 'alphapy', 'atm', 'keras', 'devol', 'ludwig', 'safe', 'neuraxle']:
y_pred=clf.predict(features)
elif modeltype=='alphapy':
# go to the right folder
curdir=os.getcwd()
print(os.listdir())
os.chdir(common_name+'_alphapy_session')
alphapy_dir=os.getcwd()
os.chdir('input')
os.rename('test.csv', 'predict.csv')
os.chdir(alphapy_dir)
os.system('alphapy --predict')
os.chdir('output')
listdir=os.listdir()
for k in range(len(listdir)):
if listdir[k].startswith('predictions'):
csvfile=listdir[k]
y_pred=pd.read_csv(csvfile)['prediction']
os.chdir(curdir)
elif modeltype == 'autogluon':
curdir=os.getcwd()
os.chdir(model_dir+'/model/')
from autogluon import TabularPrediction as task
print(os.getcwd())
if transformer != '':
new_features=dict()
for i in range(len(features[0])):
new_features['feature_%s'%(str(i))]=[features[0][i]]
print(new_features)
df=pd.DataFrame(new_features)
else:
df=pd.DataFrame(features, columns=labels)
y_pred=clf.predict(df)
os.chdir(curdir)
elif modeltype == 'autokeras':
curdir=os.getcwd()
os.chdir(model_dir+'/model')
print(os.getcwd())
y_pred=clf.predict(features).flatten()
os.chdir(curdir)
elif modeltype == 'autopytorch':
y_pred=clf.predict(features).flatten()
elif modeltype == 'atm':
curdir=os.getcwd()
os.chdir('atm_temp')
data = pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
y_pred = clf.predict(data)
os.chdir(curdir)
elif modeltype == 'ludwig':
data=pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
pred=clf.predict(data)['class__predictions']
y_pred=np.array(list(pred), dtype=np.int64)
elif modeltype== 'devol':
features=features.reshape(features.shape+ (1,)+ (1,))
y_pred=clf.predict_classes(features).flatten()
elif modeltype=='keras':
if mtype == 'c':
y_pred=clf.predict_classes(features).flatten()
elif mtype == 'r':
y_pred=clf.predict(feaures).flatten()
elif modeltype =='neuraxle':
y_pred=clf.transform(features)
elif modeltype=='safe':
# have to make into a pandas dataframe
test_data=pd.read_csv('test.csv').drop(columns=['class_'], axis=1)
y_pred=clf.predict(test_data)
# update model in schema
# except:
# print('error %s'%(modeltype.upper()))
# try:
# get class from classes (assuming classification)
'''
X={'male': [1],
'female': [2],
'other': [3]}
then do a search of the values
names=list(X) --> ['male', 'female', 'other']
i1=X.values().index([1]) --> 0
names[i1] --> male
'''
# print(modeldata)
outputs=dict()
for i in range(len(classes)):
outputs[classes[i]]=[i]
names=list(outputs)
i1=list(outputs.values()).index(y_pred)
class_=classes[i1]
print(y_pred)
print(outputs)
print(i1)
print(class_)
try:
models=g['models']
except:
models=models=model_schema()
temp=models[sampletype]
if class_ not in list(temp):
temp[class_]= [modeldata]
else:
tclass=temp[class_]
try:
# make a list if it is not already to be compatible with deprecated versions
tclass.append(modeldata)
except:
tclass=[tclass]
tclass.append(modeldata)
temp[class_]=tclass
models[sampletype]=temp
g['models']=models
print(class_)
# update database
jsonfilename=open(jsonfiles[k],'w')
json.dump(g,jsonfilename)
jsonfilename.close()
except:
print('error making jsonfile %s'%(jsonfiles[k].upper()))
else:
try:
for k in range(len(csvfiles)):
if len(csvfiles[k].split('featurized')) == 2:
features=
|
pd.read_csv(csvfiles[k])
|
pandas.read_csv
|
import structurelearning
import pandas as pd
import math
from operator import mul
from functools import reduce
import itertools
from collections import defaultdict
import numpy as np
from collections import namedtuple
def variableElimination(index, observations, model):
#print('Starting Variable Elimination!')
adj_matrix, cpt_list = model
if not observations[0]:
return cpt_list[index]
observed_indices = [o_id for o_id, o_value in observations]
# Local CPT instantiated by evidence
factors_cpts = factorize(adj_matrix, observations, cpt_list)
reduced_factors = factors_cpts.copy()
# Eliminate irrelevant variables, i.e. those which are not successors to any of the observed or queried variable
indices_to_keep = get_all_ancestors(index, observed_indices, adj_matrix)
# Eliminate all hidden variables (i.e. except target and observed)
for hidden_var in range(len(cpt_list)):
if hidden_var not in indices_to_keep:
# ignore all irrelevant variable
for key, value in reduced_factors.items():
if key.var == hidden_var:
del reduced_factors[key]
break
continue
if hidden_var != index and hidden_var not in observed_indices:
reduced_factors = eliminate_variable(hidden_var, reduced_factors)
# Join all remaining factors
if len(reduced_factors) != 1:
#print('Only observed and target left:', reduced_factors)
reduced_factors, reduced_cpt = pointwise_product(index, reduced_factors, reduced_factors.copy())
#print('Variable elimination finished!')
final_cpt = normalize(reduced_cpt)
# print('Final cpt:', final_cpt)
# print('Final factors:', reduced_factors)
return final_cpt
def get_all_ancestors(index, observed_indices, adj_matrix):
all_present_vars = observed_indices
all_present_vars.append(index)
ancestors = []
adj_matrix_transposed = pd.DataFrame(adj_matrix).T
for var_id, parents in adj_matrix_transposed.iterrows():
if var_id in all_present_vars:
parents_var_ids = [i for i, e in enumerate(list(parents)) if e == 1]
if parents_var_ids:
ancestors += parents_var_ids
all_present_vars = all_present_vars + ancestors
#flat_list = [item for sublist in all_present_vars for item in sublist]
return all_present_vars
def normalize(factor):
return factor / np.sum(factor)
def eliminate_variable(var, all_factors_dict):
print()
#print('Eliminating variable', var)
Factor = namedtuple("Factor", ["var", "conditions"])
new_factors_dict = all_factors_dict.copy()
# get the factors which will be multiplied pointwise
# prepare to update the factors dict based on the joined factors
relevant_factors, new_factors_dict = get_relevant_and_reduced_factors(var, all_factors_dict)
if len(relevant_factors) == 1:
#print('No factors to join, variable has no influence on the query.')
# print(relevant_factors)
del all_factors_dict[list(relevant_factors.keys())[0]]
# print('Relevant Factors dictionary contains:', relevant_factors.keys())
# print('Joining factors', relevant_factors.keys())
intermediate_factor, intermediate_cpt = pointwise_product(var, relevant_factors, all_factors_dict)
# if no factors were left out, we're ready to sum out
# sum out the variable by summing across the arrays in the cpt list
# each summed value represents the probability for the possible values of the outter most variable
# print('Before sumout interm factor', intermediate_factor, 'interm cpt', intermediate_cpt)
reduced_factor, reduced_cpt = sum_out(var, intermediate_factor, intermediate_cpt)
new_factors_dict[reduced_factor] = reduced_cpt
return new_factors_dict
def pointwise_product(var, relevant_factors, all_factors_dict):
Factor = namedtuple("Factor", ["var", "conditions"])
processed_factors = relevant_factors.copy()
# loop over the relevant factors and multiply pointwise until a single factor is left
# intermediate cpt for transitioning between multiple joins
intermediate_factor = Factor(var=1, conditions=(0))
intermediate_cpt = np.array([])
for factor_1, factor_2 in group_factors(relevant_factors, 2):
#print('Factors to join',factor_1, factor_2)
# since we're grouping pairwise, we want to make sure there is no factor left out
# in case of an odd number of factors
del processed_factors[factor_1]
del processed_factors[factor_2]
# Here in join factors is where the pointwise product takes place
joined_factor, cpt_multiplied = join_factors(var, factor_1, factor_2, all_factors_dict)
if intermediate_cpt.size == 0:
intermediate_cpt = cpt_multiplied
intermediate_factor = joined_factor
all_factors_dict[intermediate_factor] = intermediate_cpt
else:
# pointwise product with the previously joined factors, thus we accumulate the results
intermediate_factor, intermediate_cpt = join_factors(var, intermediate_factor, joined_factor, all_factors_dict)
all_factors_dict[intermediate_factor] = intermediate_cpt
if processed_factors:
# there is one more left as we had odd number to join
factor_left = list(processed_factors.keys())[0]
intermediate_factor, intermediate_cpt = join_factors(factor_left.var, intermediate_factor, factor_left, all_factors_dict)
return intermediate_factor, intermediate_cpt
def join_factors(var, factor_1, factor_2, factors_dict):
#print('Joining for var', var)
#print('Joining', factor_1, factor_2)
Factor = namedtuple("Factor", ["var", "conditions"])
# Build new factor name = merge of the involved variable names
factor_1_vars = get_involved_factor_variables(factor_1)
factor_2_vars = get_involved_factor_variables(factor_2)
factor_var = tuple(set(factor_1_vars + factor_2_vars))
if var in factor_1_vars:
position_of_target_f1 = list(factor_1_vars).index(var)
else:
position_of_target_f1 = -1
if var in factor_2_vars:
position_of_target_f2 = list(factor_2_vars).index(var)
else:
position_of_target_f2 = -1
# pointwise multiplication based on several possible cases dependent on the dimensions
# print('Pointwise multiplication for', factor_1, factor_2)
#print('Factors dict', factors_dict)
# print('CPT_1: ', factors_dict[factor_1].shape)
# print(factors_dict[factor_1])
# print('CPT_2: ', factors_dict[factor_2].shape)
# print(factors_dict[factor_2])
cpt_factor_1 = factors_dict[factor_1]
cpt_factor_2 = factors_dict[factor_2]
cpt_factor_1_shape = cpt_factor_1.shape
cpt_factor_2_shape = cpt_factor_2.shape
#print('CPTs to multiply:', cpt_factor_1, cpt_factor_2)
#print(cpt_factor_1_shape, cpt_factor_2_shape)
cpt_multiplied = []
# case example (3,) * (2, 3, 2) => (1,3,1) * (2,3,2)
if (cpt_factor_1_shape == cpt_factor_2_shape):
cpt_multiplied = cpt_factor_1 * cpt_factor_2
elif (len(cpt_factor_1_shape) == 1 and len(cpt_factor_2_shape) == 3) or (
len(cpt_factor_2_shape) == 1 and len(cpt_factor_1_shape) == 3):
if len(cpt_factor_1_shape) == 1:
if cpt_factor_1_shape[0] == cpt_factor_2_shape[1]:
cpt_multiplied = multiply_with_padding_on_both_sides(cpt_factor_1, cpt_factor_2)
else:
if cpt_factor_2_shape[0] == cpt_factor_1_shape[1]:
cpt_multiplied = multiply_with_padding_on_both_sides(cpt_factor_2, cpt_factor_1)
# case shared dimension is at the same position e.g. (R,T) * (L,T) = > (R,T,L), where shared is at position 1
elif (len(cpt_factor_1_shape) == len(cpt_factor_2_shape) and (position_of_target_f1 == position_of_target_f2)):
shared_dim = position_of_target_f1
cpt_multiplied = multiply_pintwise_across_shared_dim(cpt_factor_1, cpt_factor_2, shared_dim)
elif (cpt_factor_1_shape != cpt_factor_2_shape):
# case that one of them is a plain number
if not cpt_factor_1_shape or not cpt_factor_2_shape:
cpt_multiplied = cpt_factor_1 * cpt_factor_2
# otherwise multiply where the target is with the other probabilities
elif var in factor_1.var:
#print(cpt_factor_1)
#print(cpt_factor_2)
cpt_multiplied = []
for val in cpt_factor_1:
partial_res = []
for val_2 in cpt_factor_2:
partial_res.append(val * val_2)
cpt_multiplied.append(sum(partial_res))
else:
cpt_multiplied = cpt_factor_1 * cpt_factor_2
joined_factor = Factor(var=factor_var, conditions=())
factors_dict[joined_factor] = cpt_multiplied
#print('Joined Factor:', joined_factor)
#print('Joined CPT', cpt_multiplied)
#print()
return joined_factor, cpt_multiplied
def multiply_pintwise_across_shared_dim(cpt_1, cpt_2, dim):
# print('original dimensions', cpt_1.shape, cpt_2.shape)
# print('shared dimension', dim)
cpt_new_dimensions = merge_dimensions(cpt_1, cpt_2, dim)
cpt_new_array = np.zeros(cpt_new_dimensions, dtype=np.float64)
# print('new joined cpt shape', cpt_new_array.shape)
dim_list = list(cpt_new_dimensions)
combinations_first_factor = get_values_combinations(cpt_1.shape)
# print('combinations to loop over', combinations_first_factor)
for combination in combinations_first_factor:
array_indices = list(combination)
shared_dim_value = array_indices[dim]
firs_factor_values = cpt_1[combination]
second_factor_conditioned_values = condition_cpt_no_par(cpt_2, dim, shared_dim_value)
product = firs_factor_values * second_factor_conditioned_values
cpt_new_array[combination] = product
return cpt_new_array
def get_values_combinations(cpt):
list_possible_values = []
for dim in cpt:
list_possible_values.append(list(range(dim)))
tuples_combinations = []
for element in itertools.product(*list_possible_values):
tuples_combinations.append(element)
return tuples_combinations
def condition_cpt_no_par(old_cpt, par_id, par_value):
# print(old_cpt)
# print(old_cpt.shape)
cpt_dimensions = old_cpt.shape
s = slice(None)
conditions = []
# first entry is the variable itself, we always keep it (assume that the queries target is never conditioned)
# par_id += 1 # since they start from 0, but the 0 index is alway the variable itself
for dim in range(len(cpt_dimensions)):
if dim == par_id:
conditions.append(par_value)
else:
conditions.append(s)
# print(conditions)
conditioned_cpt = old_cpt[tuple(conditions)]
return conditioned_cpt
def merge_dimensions(cpt_1, cpt_2, dim):
cpt_new_dimensions = []
for e in range(len(cpt_1.shape)):
if e != dim:
cpt_new_dimensions.append(cpt_1.shape[e])
for e in range(len(cpt_2.shape)):
if e != dim:
cpt_new_dimensions.append(cpt_2.shape[e])
cpt_new_dimensions.insert(dim, list(cpt_1.shape)[dim])
cpt_new_dimensions = tuple(cpt_new_dimensions)
return cpt_new_dimensions
def multiply_with_padding_on_both_sides(factor_cpt_to_pad, factor_to_multiply):
ar_pad_right = factor_cpt_to_pad[:, np.newaxis]
ar_pad_right_left = ar_pad_right[np.newaxis, :]
return ar_pad_right_left * factor_to_multiply
def get_involved_factor_variables(factor):
involved_var = ()
if isinstance(factor.var, int):
involved_var += (factor.var,)
else:
involved_var += factor.var
if isinstance(factor.conditions, int):
involved_var += (factor.conditions,)
else:
involved_var += factor.conditions
return involved_var
def sum_out(var_id, factor, cpt):
# print('Summing out', var_id, 'for factor,', factor, 'and cpt', cpt)
Factor = namedtuple("Factor", ["var", "conditions"])
if len(cpt.shape) == 1:
summed_cpt = sum(cpt)
else:
summed_cpt = [sum(x) for x in cpt]
factor_var = get_involved_factor_variables(factor)
factor_var = tuple(var for var in factor_var if var != var_id)
return Factor(var=factor_var, conditions=()), summed_cpt
# return 2 data structures: the relevant for further processing factors,
# and the dictionary without the factors that will be merged
def get_relevant_and_reduced_factors(var, all_factors_dict):
relevant_factors = all_factors_dict.copy()
reduced_factors = all_factors_dict.copy()
for factor in all_factors_dict.keys():
if not check_if_var_in_factor(var, factor):
del relevant_factors[factor]
else:
del reduced_factors[factor]
return relevant_factors, reduced_factors
def check_if_var_in_factor(var_id, factor):
if isinstance(factor.conditions, int):
factor_var = (factor.var,) + (factor.conditions,)
else:
if isinstance(factor.var, int):
factor_var = (factor.var,) + factor.conditions
else:
factor_var = factor.var + factor.conditions
if var_id in factor_var:
return True
else:
return False
def group_factors(rel_factors, n):
"s -> (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1), (s2n,s2n+1,s2n+2,...s3n-1), ..."
return zip(*[iter(rel_factors)] * n)
def factorize(adj_matrix, observations, cpt_list):
"""
Returns a factors dictionary, where (key = named tuple, i.e. var_id, and array of var_ids on which it's conditioned,
values = cpt list for the arrangement)
"""
Factor = namedtuple("Factor", ["var", "conditions"])
# view of the data as (rows = array of the variable parents)
adj_matrix_transposed =
|
pd.DataFrame(adj_matrix)
|
pandas.DataFrame
|
"""
This module contains main flow.
To generate submission run: `python main.py`
This is a regression approach explained here: https://www.kaggle.com/c/petfinder-adoption-prediction/discussion/76107
"""
import pandas as pd
import numpy as np
from data_utils import get_dataset
from preprocessing import remove_object_cols
from models import kfold_lgb, get_logistic
from submission_utils import OptimizedRounder, generate_submission
from evaluation_utils import sklearn_quadratic_kappa
TARGET_COL = 'AdoptionSpeed'
if __name__ == '__main__':
# step 1 - load and transform data
# load train and test tabular datasets
datasets = {dataset_type: get_dataset(dataset_type) for dataset_type in ('train', 'test')}
# remove all string columns from dataset
# todo: investigate if there are no int/float categorical cols left that hasn't been one-hot encoded
cleaned_datasets = {dataset_type: remove_object_cols(dataset) for dataset_type, dataset in datasets.items()}
# extract training labels
y_train = cleaned_datasets['train'][TARGET_COL]
print(cleaned_datasets)
# step 2 - train a model and get it's outputs
# get outputs from k-fold CV LGBM training
outputs = kfold_lgb(cleaned_datasets)
outputs1 = get_logistic(cleaned_datasets)
# step 3 - round the outputs, compute quadratic kappa and generate submission
# initialize and train OptimizedRounder
optR = OptimizedRounder()
optR.fit(outputs['train'], y_train.values)
# get rounding coefficients
coefficients = optR.coefficients()
# round outputs for training/test set
rounded_train_outputs = optR.predict(outputs['train'], coefficients).astype(int)
rounded_test_outputs = optR.predict(outputs['test'].mean(axis=1), coefficients).astype(int)
# compute quadratic kappa for train set and print it
qwk_train = sklearn_quadratic_kappa(y_train.values, rounded_train_outputs)
print(f"\nTrain QWK: {qwk_train}")
# compare models using chi sq metrics
model_comparison = compare_models_chisq(rounded_test_outputs, np.around(outputs1['test']))
# print distributions of predictions vs. true distributions
print("\nTrue Distribution:")
print(
|
pd.value_counts(y_train, normalize=True)
|
pandas.value_counts
|
import argparse
import os
import shutil
import subprocess
from datetime import datetime
from pathlib import Path
import pandas as pd
import numpy as np
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from mtc.challenge_pipelines.preprocess_data import generate_preprocessed_data
from mtc.settings import NLP_EXPERIMENT_PATH, NLP_RAW_DATA
from mtc.core.embeddings import DocumentEmbeddings
from mtc.core.sentence import Sentence
root_path = Path(NLP_RAW_DATA) / 'n2c2'
train_name = 'clinicalSTS2019.train.txt'
test_name = 'clinicalSTS2019.test.txt'
test_labels_name = 'clinicalSTS2019.test.gs.sim.txt'
def prepare_input_folder_official(folder):
folder = folder / 'n2c2'
folder.mkdir(parents=True, exist_ok=True)
# Just copy the original data
shutil.copy2(root_path / train_name, folder / train_name)
shutil.copy2(root_path / test_name, folder / test_name)
shutil.copy2(root_path / test_labels_name, folder / test_labels_name)
def prepare_input_folder_complete(folder):
folder = folder / 'n2c2'
folder.mkdir(parents=True, exist_ok=True)
# Load raw data
df_train = pd.read_csv(root_path / train_name, sep='\t', header=None)
df_test =
|
pd.read_csv(root_path / test_name, sep='\t', header=None)
|
pandas.read_csv
|
"""
"DESCRIPTION_TRANSLATED", "DESCRIPTION", "LOAN_USE"のカラムに基づく特徴量の生成
Output:
output/preprocess/table/sentence_feature/{c}/
Examples:
python src/table/sentence_feature.py
"""
import re
import string
import os
import sys
from pathlib import Path
from dotenv import load_dotenv
import nltk
import pandas as pd
import textstat
from nltk.tokenize import sent_tokenize
nltk.download('punkt')
load_dotenv() # noqa
sys.path.append(f"{os.getenv('PROJECT_ROOT')}src/") # noqa
ROOT = Path(os.getenv('PROJECT_ROOT'))
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', '#', '*', '+', '\\', '•', '~', '@', '£', # noqa
'·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '\n', '\xa0', '\t', # noqa
'“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '\u3000', '\u202f', # noqa
'▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '«', # noqa
'∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ] # noqa
html_tags = ['<p>', '</p>', '<table>', '</table>', '<tr>', '</tr>', '<ul>', '<ol>', '<dl>', '</ul>', '</ol>',
'</dl>', '<li>', '<dd>', '<dt>', '</li>', '</dd>', '</dt>', '<h1>', '</h1>',
'<br>', '<br/>', '<br />', '<strong>', '</strong>', '<span>', '</span>', '<blockquote>', '</blockquote>',
'<pre>', '</pre>', '<div>', '</div>', '<h2>', '</h2>', '<h3>', '</h3>', '<h4>', '</h4>', '<h5>', '</h5>',
'<h6>', '</h6>', '<blck>', '<pr>', '<code>', '<th>', '</th>', '<td>', '</td>', '<em>', '</em>']
empty_expressions = ['<', '>', '&', ' ',
' ', '–', '—', ' ', '"', ''']
def create_basic_feature(df: pd.DataFrame, target_column: str) -> pd.DataFrame:
"""
dfのtarget_columnの文章に基づくの特徴量を作成する
Args:
df:
target_column:
Returns:
dfに特徴量の列を追加したDataFrame
"""
df_ = df.copy()
# スペースのぞいた文字数
df_[f'{target_column}_char_count'] = df_[target_column].apply(lambda t: len(t.replace(' ', '')))
# 単語
df_[f'{target_column}_word_count'] = df_[target_column].apply(lambda t: len(str(t).split()))
df_[f'{target_column}_word_unique_count'] = df_[target_column].apply(lambda t: len(set(str(t).split())))
df_[f'{target_column}_word_unique_ratio'] = df_[
f'{target_column}_word_unique_count'] / (df_[f'{target_column}_word_count'] + 1)
df_[f'{target_column}_word_ave_length'] = df_[target_column].apply(
lambda t: sum([len(w) for w in t.split()]) / len(t.split()))
# 句読点
punctuations = string.punctuation
df_[f'{target_column}_punc_count'] = df_[target_column].apply(lambda t: len([w for w in t if w in punctuations]))
# 大文字、小文字
df_[f'{target_column}_uppercase'] = df_[target_column].str.findall(r'[A-Z]').str.len() + 1
df_[f'{target_column}_lowercase'] = df_[target_column].str.findall(r'[a-z]').str.len() + 1
df_[f'{target_column}_up_low_ratio'] = df_[f'{target_column}_uppercase'] / (df_[f'{target_column}_lowercase'] + 1)
# 段落
df_[f'{target_column}_paragraph_count'] = df_[target_column].apply(lambda t: t.count('\n'))
# 文章
df_[f'{target_column}_sentence_count'] = df_[target_column].apply(lambda t: len(sent_tokenize(t)))
df_[f'{target_column}_sentence_ave_length'] = df_[target_column].apply(
lambda t: sum([len(s) for s in sent_tokenize(t)]) / len(sent_tokenize(t))
)
df_[f'{target_column}_sentence_max_length'] = df_[target_column].apply(
lambda t: max([len(s) for s in sent_tokenize(t)])
)
df_[f'{target_column}_sentence_min_length'] = df_[target_column].apply(
lambda t: min([len(s) for s in sent_tokenize(t)]))
df_[f'{target_column}_word_per_sentence'] = df_[
f'{target_column}_word_count'] / df_[f'{target_column}_sentence_count']
# 母音
df_[f'{target_column}_syllable_count'] = df_[target_column].apply(lambda t: textstat.syllable_count(t))
df_[f'{target_column}_syllable_per_sentence'] = df_[
f'{target_column}_syllable_count'] / df_[f'{target_column}_sentence_count']
df_[f'{target_column}_syllable_per_word'] = df_[
f'{target_column}_syllable_count'] / df_[f'{target_column}_word_count']
return df_
def rm_spaces(text):
spaces = ['\u200b', '\u200e', '\u202a', '\u2009', '\u2028', '\u202c', '\ufeff', '\uf0d8', '\u2061', '\u3000',
'\x10', '\x7f', '\x9d', '\xad',
'\x97', '\x9c', '\x8b', '\x81', '\x80', '\x8c', '\x85', '\x92', '\x88', '\x8d', '\x80', '\x8e', '\x9a',
'\x94', '\xa0',
'\x8f', '\x82', '\x8a', '\x93', '\x90', '\x83', '\x96', '\x9b', '\x9e', '\x99', '\x87', '\x84', '\x9f',
] # noqa
for space in spaces:
text = text.replace(space, ' ')
return text
def remove_urls(x):
x = re.sub(r'(https?://[a-zA-Z0-9.-]*)', r'', x)
# original
x = re.sub(r'(quote=\w+\s?\w+;?\w+)', r'', x)
return x
def clean_puncts(x):
for punct in puncts:
x = x.replace(punct, f' {punct} ')
return x
def clean_html_tags(x, stop_words=[]):
for r in html_tags:
x = x.replace(r, '')
for r in empty_expressions:
x = x.replace(r, ' ')
for r in stop_words:
x = x.replace(r, '')
return x
def clean_sentences(series: pd.Series):
"""
seriesの各要素の文章をclean
Args:
series:
Returns:
"""
series = series.apply(lambda x: str(x).lower())
series = series.apply(lambda x: clean_html_tags(x))
series = series.apply(lambda x: rm_spaces(x))
series = series.apply(lambda x: remove_urls(x))
series = series.apply(lambda x: clean_puncts(x))
return series
def main():
train_df =
|
pd.read_csv(ROOT / "input" / "train.csv")
|
pandas.read_csv
|
from datetime import datetime, timedelta
import operator
from typing import Any, Sequence, Type, Union, cast
import warnings
import numpy as np
from pandas._libs import NaT, NaTType, Timestamp, algos, iNaT, lib
from pandas._libs.tslibs.c_timestamp import integer_op_not_supported
from pandas._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from pandas._libs.tslibs.timestamps import RoundTo, round_nsint64
from pandas._typing import DatetimeLikeScalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_period_dtype,
is_string_dtype,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
from pandas.core import missing, nanops, ops
from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts
from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin
import pandas.core.common as com
from pandas.core.indexers import check_bool_array_indexer
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.ops.invalid import invalid_comparison, make_invalid_op
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick
def _datetimelike_array_cmp(cls, op):
"""
Wrap comparison operations to convert Timestamp/Timedelta/Period-like to
boxed scalars/arrays.
"""
opname = f"__{op.__name__}__"
nat_result = opname == "__ne__"
@unpack_zerodim_and_defer(opname)
def wrapper(self, other):
if isinstance(other, str):
try:
# GH#18435 strings get a pass from tzawareness compat
other = self._scalar_from_string(other)
except ValueError:
# failed to parse as Timestamp/Timedelta/Period
return invalid_comparison(self, other, op)
if isinstance(other, self._recognized_scalars) or other is NaT:
other = self._scalar_type(other)
self._check_compatible_with(other)
other_i8 = self._unbox_scalar(other)
result = op(self.view("i8"), other_i8)
if isna(other):
result.fill(nat_result)
elif not is_list_like(other):
return invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
else:
if isinstance(other, list):
# TODO: could use pd.Index to do inference?
other = np.array(other)
if not isinstance(other, (np.ndarray, type(self))):
return invalid_comparison(self, other, op)
if is_object_dtype(other):
# We have to use comp_method_OBJECT_ARRAY instead of numpy
# comparison otherwise it would fail to raise when
# comparing tz-aware and tz-naive
with np.errstate(all="ignore"):
result = ops.comp_method_OBJECT_ARRAY(
op, self.astype(object), other
)
o_mask = isna(other)
elif not type(self)._is_recognized_dtype(other.dtype):
return invalid_comparison(self, other, op)
else:
# For PeriodDType this casting is unnecessary
other = type(self)._from_sequence(other)
self._check_compatible_with(other)
result = op(self.view("i8"), other.view("i8"))
o_mask = other._isnan
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
return set_function_name(wrapper, opname, cls)
class AttributesMixin:
_data: np.ndarray
@classmethod
def _simple_new(cls, values, **kwargs):
raise AbstractMethodError(cls)
@property
def _scalar_type(self) -> Type[DatetimeLikeScalar]:
"""The scalar associated with this datelike
* PeriodArray : Period
* DatetimeArray : Timestamp
* TimedeltaArray : Timedelta
"""
raise
|
AbstractMethodError(self)
|
pandas.errors.AbstractMethodError
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gps_building_blocks.ml.diagnostics.binary_classification."""
from absl.testing import absltest
import numpy as np
import pandas as pd
import sklearn.metrics
from absl.testing import parameterized
from gps_building_blocks.ml.diagnostics import binary_classification
TEST_DATA = pd.DataFrame({
'label': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'prediction': [
0.7, 0.63, 0.4, 0.77, 0.45, 0.8, 0.41, 0.82, 0.7, 0.6, 0.5, 0.45, 0.74,
0.11, 0.21, 0.05, 0.67, 0.79, 0.60, 0.10
],
'num_feature_1': [
20, 30, 22.5, 19, 30, 32, 15.6, 17.87, 25.45, 17.3, 30.2, 33, 27.5,
25.1, 35.6, 33.26, 38.5, 31.23, 28.44, 30.32
],
'cat_feature_1': [
'M', 'M', 'F', 'M', 'M', 'F', 'M', 'M', 'F', 'F', 'F', 'F', 'M', 'M',
'F', 'M', 'F', 'M', 'M', 'F'
]
})
class BinaryClassificationDiagnosticsTest(parameterized.TestCase,
absltest.TestCase):
def test_calc_performance_metrics_returns_correct_values(self):
expected_results = {
'prop_positives': 0.5000,
'auc_roc': 0.7100,
'auc_pr': 0.7278,
'binarize_threshold': 0.5000,
'accuracy': 0.6500,
'true_positive_rate': 0.7000,
'true_negative_rate': 0.6000,
'precision': 0.6364,
'f1_score': 0.6667
}
rerults = (
binary_classification.calc_performance_metrics(
labels=np.array(TEST_DATA['label']),
probability_predictions=np.array(TEST_DATA['prediction'])))
self.assertDictEqual(expected_results, rerults)
def test_resulted_bin_metrics_does_not_contain_nas(self):
results = (
binary_classification.calc_bin_metrics(
labels=np.array(TEST_DATA['label']),
probability_predictions=np.array(TEST_DATA['prediction']),
number_bins=3))
self.assertFalse(results.isna().values.any())
def test_calc_bin_metrics_returns_correct_values(self):
bin_number = [1, 2, 3]
bin_size = [7, 5, 8]
positive_instances = [5, 2, 3]
precision = [0.7143, 0.4000, 0.3750]
prop_positives = [0.5000, 0.5000, 0.5000]
precision_uplift = [1.4286, 0.8000, 0.7500]
coverage = [0.5000, 0.2000, 0.3000]
results = (
binary_classification.calc_bin_metrics(
labels=np.array(TEST_DATA['label']),
probability_predictions=np.array(TEST_DATA['prediction']),
number_bins=3))
self.assertListEqual(results['bin_number'].tolist(), bin_number)
self.assertListEqual(results['bin_size'].tolist(), bin_size)
self.assertListEqual(results['positive_instances'].tolist(),
positive_instances)
self.assertListEqual(results['precision'].tolist(), precision)
self.assertListEqual(results['prop_positives'].tolist(), prop_positives)
self.assertListEqual(results['precision_uplift'].tolist(), precision_uplift)
self.assertListEqual(results['coverage'].tolist(), coverage)
def test_plot_bin_metrics_returns_bar_plots_with_correct_elements(self):
bin_metrics = (
binary_classification.calc_bin_metrics(
labels=np.array(TEST_DATA['label']),
probability_predictions=np.array(TEST_DATA['prediction']),
number_bins=3))
x_data = list(bin_metrics['bin_number'])
y_data_precision = list(bin_metrics['precision'])
y_data_precision_uplift = list(bin_metrics['precision_uplift'])
y_data_coverage = list(bin_metrics['coverage'])
plots = binary_classification.plot_bin_metrics(bin_metrics)
plot_1 = plots[0]
plot_2 = plots[1]
plot_3 = plots[2]
with self.subTest(name='test the elements of precision bar plot'):
self.assertListEqual(
x_data, [int(tick.get_text()) for tick in plot_1.get_xticklabels()])
self.assertListEqual(y_data_precision,
[h.get_height() for h in plot_1.patches])
with self.subTest(name='test the elements of precision uplift bar plot'):
self.assertListEqual(
x_data, [int(tick.get_text()) for tick in plot_2.get_xticklabels()])
self.assertListEqual(y_data_precision_uplift,
[h.get_height() for h in plot_2.patches])
with self.subTest(name='test the elements of coverage bar plot'):
self.assertListEqual(
x_data, [int(tick.get_text()) for tick in plot_3.get_xticklabels()])
self.assertListEqual(y_data_coverage,
[h.get_height() for h in plot_3.patches])
def test_calc_cumulative_bin_metrics_returns_correct_values(self):
cumulative_bin_number = [1, 2, 3]
bin_size = [7, 13, 20]
bin_size_proportion = [0.3500, 0.6500, 1.0000]
positive_instances = [5, 7, 10]
precision = [0.7143, 0.5385, 0.5000]
coverage = [0.5000, 0.7000, 1.0000]
prop_label_positives = [0.5000, 0.5000, 0.5000]
precision_uplift = [1.4286, 1.0770, 1.0000]
results = (
binary_classification.calc_cumulative_bin_metrics(
labels=np.array(TEST_DATA['label']),
probability_predictions=np.array(TEST_DATA['prediction']),
number_bins=3))
self.assertListEqual(results['cumulative_bin_number'].tolist(),
cumulative_bin_number)
self.assertListEqual(results['bin_size'].tolist(), bin_size)
self.assertListEqual(results['bin_size_proportion'].tolist(),
bin_size_proportion)
self.assertListEqual(results['positive_instances'].tolist(),
positive_instances)
self.assertListEqual(results['precision'].tolist(), precision)
self.assertListEqual(results['coverage (recall)'].tolist(), coverage)
self.assertListEqual(results['prop_label_positives'].tolist(),
prop_label_positives)
self.assertListEqual(results['precision_uplift'].tolist(), precision_uplift)
def test_plot_cumulative_bin_metrics_returns_correct_plots(self):
cumulative_bin_metrics = (
binary_classification.calc_cumulative_bin_metrics(
labels=np.array(TEST_DATA['label']),
probability_predictions=np.array(TEST_DATA['prediction']),
number_bins=3))
x_data = list(cumulative_bin_metrics['cumulative_bin_number'])
y_data_precision = list(cumulative_bin_metrics['precision'])
y_data_precision_uplift = list(cumulative_bin_metrics['precision_uplift'])
y_data_coverage = list(cumulative_bin_metrics['coverage (recall)'])
plots = (
binary_classification.plot_cumulative_bin_metrics(
cumulative_bin_metrics))
plot_1 = plots[0]
plot_2 = plots[1]
plot_3 = plots[2]
with self.subTest(name='test the elements of precision bar plot'):
self.assertListEqual(
x_data, [int(tick.get_text()) for tick in plot_1.get_xticklabels()])
self.assertListEqual(y_data_precision,
[h.get_height() for h in plot_1.patches])
with self.subTest(name='test the elements of precision uplift bar plot'):
self.assertListEqual(
x_data, [int(tick.get_text()) for tick in plot_2.get_xticklabels()])
self.assertListEqual(y_data_precision_uplift,
[h.get_height() for h in plot_2.patches])
with self.subTest(name='test the elements of coverage bar plot'):
self.assertListEqual(
x_data, [int(tick.get_text()) for tick in plot_3.get_xticklabels()])
self.assertListEqual(y_data_coverage,
[h.get_height() for h in plot_3.patches])
def test_plot_binned_features_return_plots_with_correct_elements(self):
number_bins = 3
prediction_column_name = 'prediction'
# prepare results
test_data = TEST_DATA.sort_values(
by=prediction_column_name, ascending=False)
test_data['bin_number'] = (
number_bins -
pd.qcut(test_data[prediction_column_name], q=number_bins, labels=False))
# stats for the numerical feature
num_binned_test_data = test_data[['bin_number', 'num_feature_1']]
num_binned_test_data = num_binned_test_data.rename(
columns={'num_feature_1': 'v'})
num_bin_stats = (
num_binned_test_data[['bin_number',
'v']].groupby('bin_number',
as_index=False).agg('mean'))
num_bin_stats.columns = ['bin_number', 'var_mean']
# stats for the categorical feature
cat_binned_test_data = test_data[['bin_number', 'cat_feature_1']]
cat_binned_test_data = cat_binned_test_data.rename(
columns={'cat_feature_1': 'categories'})
bin_counts = (
cat_binned_test_data.groupby('bin_number', as_index=False).count())
bin_counts.columns = ['bin_number', 'total_count']
cat_binned_test_data['temp_column'] = 1
bin_category_counts = (
cat_binned_test_data.groupby(['bin_number', 'categories'],
as_index=False).count())
bin_category_counts.columns = ['bin_number', 'categories', 'count']
cat_bin_stats =
|
pd.merge(bin_category_counts, bin_counts, on='bin_number')
|
pandas.merge
|
from ..responses import ResponseMsg
from .argument import ArgSession, Argument
from ..paths import PATHS
import pandas as pd
import os, datetime
# 2021-12-11: 完成代码并进行调试
# 2021-12-11: 支持多条加入,另外加入了删除机制
SUBS_LIST = os.path.join(PATHS['data'], 'qq_subscription_list.xlsx')
# 给scheduler调用,用于查找订阅列表
def get_qq_subscriptions(request, now=None):
# 如果订阅列表不存在
if not os.path.exists(SUBS_LIST):
return []
df = pd.read_excel(SUBS_LIST)
request_list = []
if now is None:
now = datetime.datetime.now()
for i in df.to_dict('records'):
if int(i['hour']) == now.hour and int(i['minute'] == now.minute):
new_r = request.new(msg=i['message'])
new_r.user_id = str(i['user_id'])
request_list.append(new_r)
return request_list
class AddQQSubscriptionSession(ArgSession):
def __init__(self, user_id):
ArgSession.__init__(self, user_id=user_id)
self.session_type = '添加QQ订阅条目'
self.description = '添加订阅条目,进行定点报时或天气预报等'
self._max_delta = 60
self.strict_commands = ['订阅', 'subscribe']
self.arg_list = [Argument(key='hour', alias_list=['-hr'],
required=True, get_next=True,
help_text='发送消息的时间(小时,0-23之间)',
ask_text='订阅时间是(小时,可带小数)?'),
Argument(key='minute', alias_list=['-min'],
required=False, get_next=True,
default_value=0,
help_text='发送消息的时间(分钟,0-59之间)'),
Argument(key='delta-hour', alias_list=['-dhr'],
required=False, get_next=True,
default_value=0,
help_text='重复发送间隔(整数,默认为0,不重复)'),
Argument(key='user_id', alias_list=['-uid'],
required=False, get_next=True,
help_text='接收订阅的用户ID(QQ号)'),
Argument(key='message', alias_list=['-msg'],
required=True, get_next=True,
help_text='订阅内容',
ask_text='订阅内容(即定时发送的指令)是?')]
self.default_arg = self.arg_list[0]
self.detail_description = '例如,发送“订阅 -hr 23 -min 30 -msg 北京疫情”,' \
'每天晚23时30分机器人会认为你给它发送了“北京疫情”指令,' \
'他就会将疫情信息发送给你。\n' \
'进阶案例:\n' \
'1. 订阅 -hr 20 -msg "天气 北京市奥林匹克公园 -g -nr"\n' \
'2. 订阅 -hr 8 -msg "查教室 -d 今天 -b 教一楼 -c 雁栖湖 -y"\n' \
'3. 订阅 -hr 8 -dhr 6 -msg "天气 北京市 -wmap"'
def internal_handle(self, request):
self.deactivate()
# 读取记录
hour = self.arg_dict['hour'].value
minute = self.arg_dict['minute'].value
dhour = self.arg_dict['delta-hour'].value
msg = self.arg_dict['message'].value
if self.arg_dict['user_id'].called:
user_id = str(self.arg_dict['user_id'].value)
else:
user_id = str(request.user_id)
# 整理、检测合法性
try:
minute = float(minute) + float(hour) * 60 # 全部加到分钟上
minute = int(minute) # 向下取整
hour = minute // 60
minute %= 60
hour %= 24
dhour = int(dhour)
except ValueError:
return ResponseMsg(f'【{self.session_type}】订阅失败:时间格式有误')
# 读取原数据
if os.path.exists(SUBS_LIST):
dfl = pd.read_excel(SUBS_LIST).to_dict('records')
else:
# 新建表格
dfl = []
if dhour > 0: # 重复,往后
for h in range(hour, 24, dhour):
dfl.append({'hour': h, 'minute': minute, 'user_id': user_id, 'message': msg})
elif dhour < 0: # 重复,往回
for h in range(hour, -1, dhour):
dfl.append({'hour': h, 'minute': minute, 'user_id': user_id, 'message': msg})
else: # 不重复
dfl.append({'hour': hour, 'minute': minute, 'user_id': user_id, 'message': msg})
# 保存数据
|
pd.DataFrame(dfl)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(),
|
Series(lower_e)
|
pandas.Series
|
"""
Some exceptions are found, and ignored:
"1aw8" in "ref", "PYR" is listed as part of chain B,
but it is not part of the protein, or listed on
the RSCB site;
it seems like its due to a modification of the Serine at
position 1 where a pyruvic acid is added
22 entries have NaN values for chain (typically large assemblies)
e.g. 5d8b, 5j4d, 6b4v, 6tz5
"""
from datetime import datetime
import string
import numpy as np
import pandas as pd
import asym_io
import asym_utils as utils
def load_mmcif(pdb_id, reformat=True):
return asym_io.load_mmcif(pdb_id, reformat=reformat)
def extract_contents_summary(mmcif):
try:
desc_poly = pd.DataFrame(mmcif['_entity_poly'])
except:
desc_poly =
|
pd.DataFrame([mmcif['_entity_poly']])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
pytest.raises(ValueError, DataFrame, 'a', [1, 2])
pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result = result.sort_index()
expected = Series(expected).sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == 'M8[ns]'
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
assert df.iat[0, 0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'US/Eastern'
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = DataFrame(arr).get_dtype_counts()
expected = Series({'datetime64[ns]': 1})
tm.assert_series_equal(result, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1.}, index=lrange(3))
result = df.get_dtype_counts()
expected =
|
Series({'float64': 1})
|
pandas.Series
|
import torch
import numpy as np
from torch.nn.utils.rnn import pad_sequence
import pickle
import pandas as pd
import random
import re
from tqdm import tqdm
_RE_COMBINE_WHITESPACE = re.compile(r"\s+")
def make_single_whitespace(text):
return _RE_COMBINE_WHITESPACE.sub(" ", text).strip()
def remove_punc(txt):
return re.sub('[^A-Za-z0-9]+', ' ', str(txt))
# Load dataset names
df = pd.read_csv('C:\projects\personal\kaggle\kaggle_coleridge_initiative\data\data_set_26897.csv')
us_dataset_names = list(df.title.values)
us_dataset_names = [make_single_whitespace(remove_punc(n)).lower() for n in us_dataset_names]
def replace_target(x, lst):
if x.TARGET.iloc[0] == '0':
# if not a dataset name, do not augment
lst.append(x)
else:
random_name_tokens = random.choice(us_dataset_names).split(' ')
new_x = pd.DataFrame()
# Replace tokens
new_x['TOKEN'] = random_name_tokens
new_x['TARGET'] = '1'
lst.append(new_x)
def load_doc_df(doc_id, augment_chance = 0.0):
doc_path = f'C:\projects\personal\kaggle\kaggle_coleridge_initiative\data\processed_data/{doc_id}.csv'
doc_df = pd.read_csv(doc_path)
doc_df['TARGET'] = doc_df['TARGET'].astype('str')
doc_df['TOKEN'] = doc_df['TOKEN'].astype('str')
if augment_chance > 0:
if random.uniform(0, 1) < augment_chance:
df_pieces = []
# Do augmentation
# Replace target tokens
gb = doc_df.groupby((doc_df.TARGET.shift() != doc_df.TARGET).cumsum())
for name, group in gb:
replace_target(group, df_pieces)
doc_df = pd.concat(df_pieces, ignore_index= True, axis = 0)
# Convert to string
tokens = list(doc_df.TOKEN.values)
tokens = [t.lower() for t in tokens]
labels = list(doc_df.TARGET.values)
return tokens, labels
def replace_target(x, lst):
if x.TARGET.iloc[0] == '0':
# if not a dataset name, do not augment
lst.append(x)
else:
random_name_tokens = random.choice(us_dataset_names).split(' ')
new_x =
|
pd.DataFrame()
|
pandas.DataFrame
|
import copy
import time
import calendar
import netCDF4
import numpy
from numpy import savez_compressed
from numpy import load
import pandas
import tensorflow
from tensorflow.keras.utils import to_categorical
import scipy.ndimage
import matplotlib.pyplot as pyplot
import seaborn as sns
import os.path
# Variable names.
NETCDF_X = 'x'
NETCDF_Y = 'y'
MUR_LATITUDE = 'lat'
MUR_LONGITUDE = 'lon'
NETCDF_LATITUDE = 'latitude'
NETCDF_LONGITUDE = 'longitude'
NETCDF_TIME = 'time'
NETCDF_UGRD_10m = 'UGRD_10maboveground'
#NETCDF_UGRD_1000mb= 'UGRD_1000mb'
NETCDF_UGRD_975mb = 'UGRD_975mb'
NETCDF_UGRD_950mb = 'UGRD_950mb'
NETCDF_UGRD_925mb = 'UGRD_925mb'
NETCDF_UGRD_900mb = 'UGRD_900mb'
NETCDF_UGRD_875mb = 'UGRD_875mb'
NETCDF_UGRD_850mb = 'UGRD_850mb'
NETCDF_UGRD_825mb = 'UGRD_825mb'
NETCDF_UGRD_800mb = 'UGRD_800mb'
NETCDF_UGRD_775mb = 'UGRD_775mb'
NETCDF_UGRD_750mb = 'UGRD_750mb'
NETCDF_UGRD_725mb = 'UGRD_725mb'
NETCDF_UGRD_700mb = 'UGRD_700mb'
NETCDF_VGRD_10m = 'VGRD_10maboveground'
#NETCDF_VGRD_1000mb= 'VGRD_1000mb'
NETCDF_VGRD_975mb = 'VGRD_975mb'
NETCDF_VGRD_950mb = 'VGRD_950mb'
NETCDF_VGRD_925mb = 'VGRD_925mb'
NETCDF_VGRD_900mb = 'VGRD_900mb'
NETCDF_VGRD_875mb = 'VGRD_875mb'
NETCDF_VGRD_850mb = 'VGRD_850mb'
NETCDF_VGRD_825mb = 'VGRD_825mb'
NETCDF_VGRD_800mb = 'VGRD_800mb'
NETCDF_VGRD_775mb = 'VGRD_775mb'
NETCDF_VGRD_750mb = 'VGRD_750mb'
NETCDF_VGRD_725mb = 'VGRD_725mb'
NETCDF_VGRD_700mb = 'VGRD_700mb'
#NETCDF_VVEL_1000mb= 'VVEL_1000mb'
NETCDF_VVEL_975mb = 'VVEL_975mb'
NETCDF_VVEL_950mb = 'VVEL_950mb'
NETCDF_VVEL_925mb = 'VVEL_925mb'
NETCDF_VVEL_900mb = 'VVEL_900mb'
NETCDF_VVEL_875mb = 'VVEL_875mb'
NETCDF_VVEL_850mb = 'VVEL_850mb'
NETCDF_VVEL_825mb = 'VVEL_825mb'
NETCDF_VVEL_800mb = 'VVEL_800mb'
NETCDF_VVEL_775mb = 'VVEL_775mb'
NETCDF_VVEL_750mb = 'VVEL_750mb'
NETCDF_VVEL_725mb = 'VVEL_725mb'
NETCDF_VVEL_700mb = 'VVEL_700mb'
#NETCDF_TKE_1000mb = 'TKE_1000mb'
NETCDF_TKE_975mb = 'TKE_975mb'
NETCDF_TKE_950mb = 'TKE_950mb'
NETCDF_TKE_925mb = 'TKE_925mb'
NETCDF_TKE_900mb = 'TKE_900mb'
NETCDF_TKE_875mb = 'TKE_875mb'
NETCDF_TKE_850mb = 'TKE_850mb'
NETCDF_TKE_825mb = 'TKE_825mb'
NETCDF_TKE_800mb = 'TKE_800mb'
NETCDF_TKE_775mb = 'TKE_775mb'
NETCDF_TKE_750mb = 'TKE_750mb'
NETCDF_TKE_725mb = 'TKE_725mb'
NETCDF_TKE_700mb = 'TKE_700mb'
NETCDF_TMP_SFC = 'TMP_surface'
NETCDF_TMP_2m = 'TMP_2maboveground'
#NETCDF_TMP_1000mb= 'TMP_1000mb'
NETCDF_TMP_975mb = 'TMP_975mb'
NETCDF_TMP_950mb = 'TMP_950mb'
NETCDF_TMP_925mb = 'TMP_925mb'
NETCDF_TMP_900mb = 'TMP_900mb'
NETCDF_TMP_875mb = 'TMP_875mb'
NETCDF_TMP_850mb = 'TMP_850mb'
NETCDF_TMP_825mb = 'TMP_825mb'
NETCDF_TMP_800mb = 'TMP_800mb'
NETCDF_TMP_775mb = 'TMP_775mb'
NETCDF_TMP_750mb = 'TMP_750mb'
NETCDF_TMP_725mb = 'TMP_725mb'
NETCDF_TMP_700mb = 'TMP_700mb'
#NETCDF_RH_1000mb = 'RH_1000mb'
NETCDF_RH_975mb = 'RH_975mb'
NETCDF_RH_950mb = 'RH_950mb'
NETCDF_RH_925mb = 'RH_925mb'
NETCDF_RH_900mb = 'RH_900mb'
NETCDF_RH_875mb = 'RH_875mb'
NETCDF_RH_850mb = 'RH_850mb'
NETCDF_RH_825mb = 'RH_825mb'
NETCDF_RH_800mb = 'RH_800mb'
NETCDF_RH_775mb = 'RH_775mb'
NETCDF_RH_750mb = 'RH_750mb'
NETCDF_RH_725mb = 'RH_725mb'
NETCDF_RH_700mb = 'RH_700mb'
NETCDF_DPT_2m = 'DPT_2maboveground'
NETCDF_FRICV = 'FRICV_surface'
NETCDF_VIS = 'VIS_surface'
NETCDF_RH_2m = 'RH_2maboveground'
#
NETCDF_Q975 = 'Q_975mb'
NETCDF_Q950 = 'Q_950mb'
NETCDF_Q925 = 'Q_925mb'
NETCDF_Q900 = 'Q_900mb'
NETCDF_Q875 = 'Q_875mb'
NETCDF_Q850 = 'Q_850mb'
NETCDF_Q825 = 'Q_825mb'
NETCDF_Q800 = 'Q_800mb'
NETCDF_Q775 = 'Q_775mb'
NETCDF_Q750 = 'Q_750mb'
NETCDF_Q725 = 'Q_725mb'
NETCDF_Q700 = 'Q_700mb'
NETCDF_Q = 'Q_surface'
#NETCDF_DQDZ1000SFC = 'DQDZ1000SFC'
NETCDF_DQDZ975SFC = 'DQDZ975SFC'
NETCDF_DQDZ950975 = 'DQDZ950975'
NETCDF_DQDZ925950 = 'DQDZ925950'
NETCDF_DQDZ900925 = 'DQDZ900925'
NETCDF_DQDZ875900 = 'DQDZ875900'
NETCDF_DQDZ850875 = 'DQDZ850875'
NETCDF_DQDZ825850 = 'DQDZ825850'
NETCDF_DQDZ800825 = 'DQDZ800825'
NETCDF_DQDZ775800 = 'DQDZ775800'
NETCDF_DQDZ750775 = 'DQDZ750775'
NETCDF_DQDZ725750 = 'DQDZ725750'
NETCDF_DQDZ700725 = 'DQDZ700725'
NETCDF_LCLT = 'LCLT'
NETCDF_DateVal = 'DateVal'
#+++++++++++++++
NETCDF_SST = 'analysed_sst'
NETCDF_PREDICTOR_NAMES = {
'OldOrder': [NETCDF_TMP_2m, NETCDF_TMP_975mb, NETCDF_TMP_950mb, NETCDF_TMP_925mb,
NETCDF_TMP_900mb, NETCDF_TMP_875mb, NETCDF_TMP_850mb, NETCDF_TMP_825mb, NETCDF_TMP_800mb, NETCDF_TMP_775mb, NETCDF_TMP_750mb,
NETCDF_TMP_725mb, NETCDF_TMP_700mb, NETCDF_UGRD_10m, NETCDF_VGRD_10m, NETCDF_FRICV, NETCDF_TKE_975mb,
NETCDF_TKE_950mb, NETCDF_TKE_925mb, NETCDF_TKE_900mb, NETCDF_TKE_875mb, NETCDF_TKE_850mb, NETCDF_TKE_825mb, NETCDF_TKE_800mb,
NETCDF_TKE_775mb, NETCDF_TKE_750mb, NETCDF_TKE_725mb, NETCDF_TKE_700mb, NETCDF_UGRD_975mb, NETCDF_UGRD_950mb,
NETCDF_UGRD_925mb, NETCDF_UGRD_900mb, NETCDF_UGRD_875mb, NETCDF_UGRD_850mb, NETCDF_UGRD_825mb, NETCDF_UGRD_800mb, NETCDF_UGRD_775mb,
NETCDF_UGRD_750mb, NETCDF_UGRD_725mb, NETCDF_UGRD_700mb, NETCDF_VGRD_975mb, NETCDF_VGRD_950mb, NETCDF_VGRD_925mb,
NETCDF_VGRD_900mb, NETCDF_VGRD_875mb, NETCDF_VGRD_850mb, NETCDF_VGRD_825mb, NETCDF_VGRD_800mb, NETCDF_VGRD_775mb, NETCDF_VGRD_750mb,
NETCDF_VGRD_725mb, NETCDF_VGRD_700mb, NETCDF_Q975, NETCDF_Q950, NETCDF_Q925, NETCDF_Q900, NETCDF_Q875, NETCDF_Q850, NETCDF_Q825, NETCDF_Q800,
NETCDF_Q775,NETCDF_Q750, NETCDF_Q725, NETCDF_Q700,
NETCDF_RH_975mb, NETCDF_RH_950mb, NETCDF_RH_925mb,NETCDF_RH_900mb, NETCDF_RH_875mb, NETCDF_RH_850mb, NETCDF_RH_825mb, NETCDF_RH_800mb,
NETCDF_RH_775mb, NETCDF_RH_750mb, NETCDF_RH_725mb, NETCDF_RH_700mb, NETCDF_DPT_2m, NETCDF_Q, NETCDF_RH_2m, NETCDF_LCLT, NETCDF_VIS,
NETCDF_VVEL_975mb, NETCDF_VVEL_950mb, NETCDF_VVEL_925mb, NETCDF_VVEL_900mb, NETCDF_VVEL_875mb, NETCDF_VVEL_850mb, NETCDF_VVEL_825mb,
NETCDF_VVEL_800mb, NETCDF_VVEL_775mb, NETCDF_VVEL_750mb, NETCDF_VVEL_725mb, NETCDF_VVEL_700mb],
'NewOrder': [NETCDF_TMP_2m, NETCDF_TMP_975mb, NETCDF_TMP_950mb, NETCDF_TMP_925mb,
NETCDF_TMP_900mb, NETCDF_TMP_875mb, NETCDF_TMP_850mb, NETCDF_TMP_825mb, NETCDF_TMP_800mb, NETCDF_TMP_775mb, NETCDF_TMP_750mb,
NETCDF_TMP_725mb, NETCDF_TMP_700mb, NETCDF_UGRD_10m, NETCDF_VGRD_10m, NETCDF_FRICV, NETCDF_UGRD_975mb, NETCDF_VGRD_975mb, NETCDF_TKE_975mb,
NETCDF_UGRD_950mb, NETCDF_VGRD_950mb, NETCDF_TKE_950mb, NETCDF_UGRD_925mb, NETCDF_VGRD_925mb, NETCDF_TKE_925mb, NETCDF_UGRD_900mb, NETCDF_VGRD_900mb,
NETCDF_TKE_900mb, NETCDF_UGRD_875mb, NETCDF_VGRD_875mb, NETCDF_TKE_875mb, NETCDF_UGRD_850mb, NETCDF_VGRD_850mb, NETCDF_TKE_850mb, NETCDF_UGRD_825mb,
NETCDF_VGRD_825mb, NETCDF_TKE_825mb, NETCDF_UGRD_800mb, NETCDF_VGRD_800mb, NETCDF_TKE_800mb, NETCDF_UGRD_775mb, NETCDF_VGRD_775mb,
NETCDF_TKE_775mb, NETCDF_UGRD_750mb, NETCDF_VGRD_750mb, NETCDF_TKE_750mb, NETCDF_UGRD_725mb, NETCDF_VGRD_725mb, NETCDF_TKE_725mb,
NETCDF_UGRD_700mb, NETCDF_VGRD_700mb, NETCDF_TKE_700mb, NETCDF_Q975, NETCDF_Q950, NETCDF_Q925, NETCDF_Q900, NETCDF_Q875, NETCDF_Q850,
NETCDF_Q825, NETCDF_Q800, NETCDF_Q775,NETCDF_Q750, NETCDF_Q725, NETCDF_Q700,
NETCDF_RH_975mb, NETCDF_RH_950mb, NETCDF_RH_925mb,NETCDF_RH_900mb, NETCDF_RH_875mb, NETCDF_RH_850mb, NETCDF_RH_825mb, NETCDF_RH_800mb,
NETCDF_RH_775mb, NETCDF_RH_750mb, NETCDF_RH_725mb, NETCDF_RH_700mb, NETCDF_DPT_2m, NETCDF_Q, NETCDF_RH_2m, NETCDF_LCLT, NETCDF_VIS,
NETCDF_VVEL_975mb, NETCDF_VVEL_950mb, NETCDF_VVEL_925mb, NETCDF_VVEL_900mb, NETCDF_VVEL_875mb, NETCDF_VVEL_850mb, NETCDF_VVEL_825mb,
NETCDF_VVEL_800mb, NETCDF_VVEL_775mb, NETCDF_VVEL_750mb, NETCDF_VVEL_725mb, NETCDF_VVEL_700mb],
'Physical_G1':[NETCDF_FRICV, NETCDF_UGRD_10m, NETCDF_UGRD_975mb, NETCDF_UGRD_950mb, NETCDF_UGRD_925mb, NETCDF_UGRD_900mb,
NETCDF_UGRD_875mb, NETCDF_UGRD_850mb, NETCDF_UGRD_825mb, NETCDF_UGRD_800mb, NETCDF_UGRD_775mb, NETCDF_UGRD_750mb,
NETCDF_UGRD_725mb, NETCDF_UGRD_700mb, NETCDF_VGRD_10m, NETCDF_VGRD_975mb, NETCDF_VGRD_950mb, NETCDF_VGRD_925mb,
NETCDF_VGRD_900mb, NETCDF_VGRD_875mb, NETCDF_VGRD_850mb, NETCDF_VGRD_825mb, NETCDF_VGRD_800mb, NETCDF_VGRD_775mb, NETCDF_VGRD_750mb,
NETCDF_VGRD_725mb, NETCDF_VGRD_700mb],
'Physical_G2':[NETCDF_TKE_975mb, NETCDF_TKE_950mb, NETCDF_TKE_925mb, NETCDF_TKE_900mb, NETCDF_TKE_875mb, NETCDF_TKE_850mb, NETCDF_TKE_825mb,
NETCDF_TKE_800mb, NETCDF_TKE_775mb, NETCDF_TKE_750mb, NETCDF_TKE_725mb, NETCDF_TKE_700mb, NETCDF_Q975, NETCDF_Q950, NETCDF_Q925, NETCDF_Q900,
NETCDF_Q875, NETCDF_Q850, NETCDF_Q825, NETCDF_Q800, NETCDF_Q775,NETCDF_Q750, NETCDF_Q725, NETCDF_Q700],
'Physical_G3':[NETCDF_TMP_2m, NETCDF_TMP_975mb, NETCDF_TMP_950mb, NETCDF_TMP_925mb, NETCDF_TMP_900mb, NETCDF_TMP_875mb, NETCDF_TMP_850mb,
NETCDF_TMP_825mb, NETCDF_TMP_800mb, NETCDF_TMP_775mb, NETCDF_TMP_750mb, NETCDF_TMP_725mb, NETCDF_TMP_700mb, NETCDF_DPT_2m, NETCDF_RH_2m,
NETCDF_RH_975mb, NETCDF_RH_950mb, NETCDF_RH_925mb,NETCDF_RH_900mb, NETCDF_RH_875mb, NETCDF_RH_850mb, NETCDF_RH_825mb, NETCDF_RH_800mb,
NETCDF_RH_775mb, NETCDF_RH_750mb, NETCDF_RH_725mb, NETCDF_RH_700mb],
'Physical_G4':[NETCDF_Q, NETCDF_LCLT, NETCDF_VIS,
NETCDF_VVEL_975mb, NETCDF_VVEL_950mb, NETCDF_VVEL_925mb, NETCDF_VVEL_900mb, NETCDF_VVEL_875mb, NETCDF_VVEL_850mb, NETCDF_VVEL_825mb,
NETCDF_VVEL_800mb, NETCDF_VVEL_775mb, NETCDF_VVEL_750mb, NETCDF_VVEL_725mb, NETCDF_VVEL_700mb]
}
NETCDF_TMPDPT = 'TMP-DPT'
NETCDF_TMPSST = 'TMP-SST'
NETCDF_DPTSST = 'DPT-SST'
NETCDF_MUR_NAMES = [NETCDF_SST]
NETCDF_TMP_NAMES = [NETCDF_TMP_SFC, NETCDF_TMP_2m, NETCDF_DPT_2m]
NETCDF_MIXED_NAMES = [NETCDF_SST, NETCDF_TMPDPT, NETCDF_TMPSST, NETCDF_DPTSST]
NETCDF_GEN_NAMES = [NETCDF_TMPDPT, NETCDF_TMPSST, NETCDF_DPTSST]
# Directories.
YEAR_FOG_DIR_NAME = '.'
ALL_FOG_DIR_NAME = '..'
DEFAULT_IMAGE_DIR_NAME = ('/data1/fog-data/fog-maps/')
#6HOURS
DEFAULT_TARGET_DIR_NAME = ('../Dataset/TARGET/')
SAVE_CUBE_DIR = '../Dataset/INPUT/MinMax/HIGH/'
SAVE_FILE_NAMES_DIR = '../Dataset/NAMES/'
SAVE_TARGET_DIR = '../Dataset/TARGET/'
DEFAULT_CUBES_12_DIR_NAME = ('../Dataset/INPUT/12Hours/')
DEFAULT_TARGET_DIR_NAME = ('../Dataset/TARGET/12Hours/')
#12HOURS
DEFAULT_12HOURS_TARGET_DIR = ('../Dataset/12HOURS/TARGET/')
DEFAULT_12HOURS_CUBES_DIR = ('../Dataset/12HOURS/INPUT/')
DEFAULT_12HOURS_NAMES_DIR = ('../Dataset/12HOURS/NAMES/')
#24HOURS
DEFAULT_24HOURS_TARGET_DIR = ('../Dataset/24HOURS/TARGET/')
DEFAULT_24HOURS_CUBES_DIR = ('../Dataset/24HOURS/INPUT/')
DEFAULT_24HOURS_NAMES_DIR = ('../Dataset/24HOURS/NAMES/')
### Defult Names and Settings
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
MINOR_SEPARATOR_STRING = '\n\n' + '-' * 50 + '\n\n'
FIG_DEFULT_SIZE = (12, 10)
PREDICTOR_NAMES_KEY = 'predictor_names'
PREDICTOR_MATRIX_KEY = 'predictor_matrix'
CUBE_NAMES_KEY = 'cube_name'
SST_MATRIX_KEY = 'sst_matrix'
SST_NAME_KEY = 'sst_name'
# Misc constants.
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
MINOR_SEPARATOR_STRING = '\n\n' + '-' * 50 + '\n\n'
DATE_FORMAT = '%Y%m%d'
DATE_FORMAT_REGEX = '[0-9][0-9][0-9][0-9][0-1][0-9][0-3][0-9]'
TIME_CYCLE_FORMAT = '[0-9][0-9][0][0]'
HOUR_PREDICTION_FORMAT = '[0-9][0-9][0-9]'
NUM_VALUES_KEY = 'num_values'
MEAN_VALUE_KEY = 'mean_value'
MEAN_OF_SQUARES_KEY = 'mean_of_squares'
#==========================================================================================#
#================================ preprocessing tools =====================================#
#==========================================================================================#
def time_string_to_unix(time_string, time_format):
"""Converts time from string to Unix format.
Unix format = seconds since 0000 UTC 1 Jan 1970.
:param time_string: Time string.
:param time_format: Format of time string (example: "%Y%m%d" or
"%Y-%m-%d-%H%M%S").
:return: unix_time_sec: Time in Unix format.
"""
return calendar.timegm(time.strptime(time_string, time_format))
def time_unix_to_string(unix_time_sec, time_format):
"""Converts time from Unix format to string.
Unix format = seconds since 0000 UTC 1 Jan 1970.
:param unix_time_sec: Time in Unix format.
:param time_format: Desired format of time string (example: "%Y%m%d" or
"%Y-%m-%d-%H%M%S").
:return: time_string: Time string.
"""
return time.strftime(time_format, time.gmtime(unix_time_sec))
def _nc_file_name_to_date(netcdf_file_name):
"""Parses date from name of image (NetCDF) file.
:param netcdf_file_name: Path to input file.
:return: date_string: Date (format "yyyymmdd").
"""
pathless_file_name = os.path.split(netcdf_file_name)[-1]
date_string = pathless_file_name.replace(pathless_file_name[0:5], '').replace(
pathless_file_name[-18:], '')
# Verify.
time_string_to_unix(time_string=date_string, time_format=DATE_FORMAT)
return date_string
def _nc_file_name_to_timecycle(netcdf_file_name):
"""Parses date from name of image (NetCDF) file.
:param netcdf_file_name: Path to input file.
:return: time-cycle prediction.
"""
pathless_file_name = os.path.split(netcdf_file_name)[-1]
timecycle_string = pathless_file_name.replace(pathless_file_name[0:14], '').replace(
pathless_file_name[-13:], '')
# Verify.
#time_string_to_unix(time_string=timecycle_string, time_format=TIME_CYCLE_FORMAT)
return timecycle_string
def _nc_file_name_to_hourprediction(netcdf_file_name):
"""Parses date from name of image (NetCDF) file.
:param netcdf_file_name: Path to input file.
:return: time-cycle prediction.
"""
pathless_file_name = os.path.split(netcdf_file_name)[-1]
hourpredic_string = pathless_file_name.replace(pathless_file_name[0:19], '').replace(
pathless_file_name[-9:], '')
return hourpredic_string
def find_match_name (netcdf_file_name):
date = _nc_file_name_to_date(netcdf_file_name)
timecycle = _nc_file_name_to_timecycle(netcdf_file_name)
hourprediction = _nc_file_name_to_hourprediction(netcdf_file_name)
this_cube_match_name = date + timecycle
return {'cube_match_name' : this_cube_match_name,
'date' : date,
'timecycle' : timecycle,
'hourprediction' : hourprediction}
def netcdf_names_check(root_dir, target=None):
# Reading Target data and list all NAM index to check and remove them plus corresponding map data:
nan_index = target[target['VIS_Cat'].isnull().values].index.tolist()
Nannames = target['Date'].iloc[nan_index]
names = Nannames.values
NAN = pandas.DataFrame(columns = ['name'])
NAN['name'] = names
# Reading the directory of map data and check and remove those they are incomplete or target is NAN!
netcef_nams_file_name = [] # we need to return the name of maps which are coomplete!
netcef_murs_file_name = [] # we need to return the name of maps which are coomplete!
for root, dirs, files in os.walk(root_dir):
dirs.sort()
files.sort()
datavalume = len(files)
if datavalume == 149:
namesplit = os.path.split(files[0])[-1]
match_name_1 = namesplit.replace(namesplit[:5], '').replace(namesplit[13:], '')
for f in NAN['name'].isin([int(match_name_1)]):
if f is True:
foldercondition = False
#print(('The Traget for "{0}" day is NAN!').format(match_name_1))
#print('Removed the corresponding map for days with NAN Target!')
#print('=====================================================================')
break
else:
foldercondition = True
if (foldercondition is True):
for name in files:
namesplit = os.path.split(name)[-1]
namOrmur = namesplit.replace(namesplit[4:], '')
if (namOrmur == 'murs'):
name = root +'/'+ name
netcef_murs_file_name.append(name)
netcef_murs_file_name.sort()
elif (namOrmur == 'maps'):
name = root +'/'+ name
netcef_nams_file_name.append(name)
netcef_nams_file_name.sort()
elif datavalume < 149 and datavalume != 0:
if files[0].endswith(".txt"):
print('break')
else:
namesplit = os.path.split(files[0])[-1]
match_name = namesplit.replace(namesplit[:5], '').replace(namesplit[13:], '')
#print(('The expected maps is 149 which there are "{0}" maps for {1} day!').format(datavalume, match_name))
target = target.drop(target[target.Date == int(match_name)].index)
#print('Removed the corresponding target values for days with incomplete data!')
#print('=====================================================================')
target = target.dropna()
target = RenewDf(target)
for d in target['Date']:
if target.loc[target.Date == d, 'Date'].count() < 4:
target = target.drop(target.loc[target.Date == d, 'Date'].index)
target = RenewDf(target)
return [netcef_nams_file_name, netcef_murs_file_name, target]
def RenewDf(df):
newdf = pandas.DataFrame(columns=['Date', 'VIS', 'VIS_Cat'])
dates = df['Date'].values
cat = df['VIS_Cat'].values
vis = df['VIS'].values
newdf['Date'] = dates
newdf['VIS_Cat'] = cat
newdf['VIS'] = vis
return newdf
def copy_mur_name_ntimes(netcdf_file_names, output, n):
for i in range(len(netcdf_file_names)):
name = netcdf_file_names[i]
for j in range(n):
output.append(name)
return output
def map_upsampling(downsampled_cube_file):
upsampled_map = None
upsampled_map = scipy.ndimage.zoom(downsampled_cube_file, 11.75, order=3)
return upsampled_map
def map_downsampling(upsampled_cube_file):
downsampled_map = None
downsampled_map = scipy.ndimage.zoom(upsampled_cube_file, 0.0851, order=3)
return downsampled_map
#===========================================================================================#
#=============== Finding the cubes based on their names ===================================#
#===========================================================================================#
def find_map_name_date(first_date_string, last_date_string, target = None, image_dir_name = DEFAULT_IMAGE_DIR_NAME):
"""Finds image (NetCDF) files in the given date range.
:param first_date_string: First date ("yyyymmdd") in range.
:param last_date_string: Last date ("yyyymmdd") in range.
:param image_dir_name: Name of directory with image (NetCDF) files.
:return: netcdf_file_names: 1-D list of paths to image files.
"""
# check the target and return the desierd target index:
Dates = target['Date'].values
good_indices_target = numpy.where(numpy.logical_and(
Dates >= int(first_date_string),
Dates <= int(last_date_string)
))[0]
input_target = target.take(good_indices_target)
target_1 = RenewDf(input_target)
netcdf_nam_file_names, netcdf_mur_file_names, target_2 = netcdf_names_check(image_dir_name, target_1)
target = RenewDf(target_2)
first_time_unix_sec = time_string_to_unix(
time_string=first_date_string, time_format=DATE_FORMAT)
last_time_unix_sec = time_string_to_unix(
time_string=last_date_string, time_format=DATE_FORMAT)
# NAM Data
file_date_strings = [_nc_file_name_to_date(f) for f in netcdf_nam_file_names]
file_times_unix_sec = numpy.array([
time_string_to_unix(time_string=d, time_format=DATE_FORMAT)
for d in file_date_strings
], dtype=int)
good_indices_nam = numpy.where(numpy.logical_and(
file_times_unix_sec >= first_time_unix_sec,
file_times_unix_sec <= last_time_unix_sec
))[0]
# MUR Data
file_date_strings_mur = [_nc_file_name_to_date(f) for f in netcdf_mur_file_names]
file_times_unix_sec_mur = numpy.array([
time_string_to_unix(time_string=d, time_format=DATE_FORMAT)
for d in file_date_strings_mur
], dtype=int)
good_indices_mur = numpy.where(numpy.logical_and(
file_times_unix_sec_mur >= first_time_unix_sec,
file_times_unix_sec_mur <= last_time_unix_sec
))[0]
return [netcdf_nam_file_names[k] for k in good_indices_nam], [netcdf_mur_file_names[k] for k in good_indices_mur], target
def find_nam_cubes_name_hourpredict(netcdf_file_names, hour_prediction_names = ['000', '006', '012', '024']):
"""Depend on the time prediction this function just select the name of selected maps:
for example in this case, the time prediction 000, 003 and 006 hour has been selected.
"""
file_date_strings = [_nc_file_name_to_hourprediction(f) for f in netcdf_file_names]
file_date_strings = pandas.DataFrame(file_date_strings, columns = ['str'])
good_indices = file_date_strings[
(file_date_strings['str'] == hour_prediction_names[0]) |
(file_date_strings['str'] == hour_prediction_names[1]) |
(file_date_strings['str'] == hour_prediction_names[2]) |
(file_date_strings['str'] == hour_prediction_names[3])]
return [netcdf_file_names[k] for k in list(good_indices.index)]
#============================================================================
#==================== Reading Nam and MUR maps =======================
#============================================================================
def read_nam_maps(netcdf_file_name, PREDICTOR_NAMES):
"""Reads fog-centered maps from NetCDF file.
E = number of examples (fog objects) in file
M = number of rows in each fog-centered grid
N = number of columns in each fog-centered grid
C = number of channels (predictor variables)
:param netcdf_file_name: Path to input file.
:return: image_dict: Dictionary with the following keys.
image_dict['predictor_names']: length-C list of predictor names.
image_dict['predictor_matrix']: E-by-M-by-N-by-C numpy array of predictor
values.
"""
NETCDF_PREDICTOR_NAMES = PREDICTOR_NAMES
dataset_object = netCDF4.Dataset(netcdf_file_name)
lons = numpy.array(dataset_object.variables[NETCDF_LONGITUDE][:], dtype=float)
lats = numpy.array(dataset_object.variables[NETCDF_LATITUDE][:], dtype=float)
predictor_matrix = None
for this_predictor_name in NETCDF_PREDICTOR_NAMES:
this_predictor_matrix = numpy.array(
dataset_object.variables[this_predictor_name][:], dtype=float
)
this_predictor_matrix = numpy.expand_dims(
this_predictor_matrix, axis=-1)
if predictor_matrix is None:
predictor_matrix = this_predictor_matrix + 0.
else:
predictor_matrix = numpy.concatenate(
(predictor_matrix, this_predictor_matrix), axis=-1
)
return {
PREDICTOR_MATRIX_KEY: predictor_matrix,
PREDICTOR_NAMES_KEY: NETCDF_PREDICTOR_NAMES,
NETCDF_LONGITUDE: lons,
NETCDF_LATITUDE: lats}
def read_mur_map(netcdf_file_name, PREDICTOR_NAMES):
NETCDF_PREDICTOR_NAMES = PREDICTOR_NAMES
dataset_object = netCDF4.Dataset(netcdf_file_name)
lons = numpy.array(dataset_object.variables[MUR_LONGITUDE][:], dtype=float)
lats = numpy.array(dataset_object.variables[MUR_LATITUDE][:], dtype=float)
predictor_matrix = None
for this_predictor_name in NETCDF_PREDICTOR_NAMES:
this_predictor_matrix = numpy.array(
dataset_object.variables[this_predictor_name][:], dtype=float
)
this_predictor_matrix = numpy.expand_dims(
this_predictor_matrix, axis=-1)
if predictor_matrix is None:
predictor_matrix = this_predictor_matrix + 0.
else:
predictor_matrix = numpy.concatenate(
(predictor_matrix, this_predictor_matrix), axis=-1
)
return {
PREDICTOR_MATRIX_KEY: predictor_matrix,
PREDICTOR_NAMES_KEY: NETCDF_PREDICTOR_NAMES,
MUR_LONGITUDE: lons,
MUR_LATITUDE: lats}
def read_many_nam_cube(netcdf_file_names, PREDICTOR_NAMES):
"""Reads storm-centered images from many NetCDF files.
:param netcdf_file_names: 1-D list of paths to input files.
:return: image_dict: See doc for `read_image_file`.
"""
image_dict = None
keys_to_concat = [PREDICTOR_MATRIX_KEY]
for this_file_name in netcdf_file_names:
#print('Reading data from: "{0:s}"...'.format(this_file_name))
this_image_dict = read_nam_maps(this_file_name, PREDICTOR_NAMES)
if image_dict is None:
image_dict = copy.deepcopy(this_image_dict)
continue
for this_key in keys_to_concat:
image_dict[this_key] = numpy.concatenate(
(image_dict[this_key], this_image_dict[this_key]), axis=0
)
return image_dict
#======================================================================================================
#=========================== Concatenate the cubes in order to create Tensor: =========================
#======================================================================================================
# in this section there are two function just to generate three manually features include of
# [TMPsurface -SST, TMPsurface-DPT, DPT -SST]
def highres_features(nam_file_name, nam_feature_name, mur_file_name, mur_feature_name):
"""
to generate the features first it needs to upsample the nam features to the same resolution of SST maps
, then geerate the new feattures and make a cube form them.
"""
keys_to_concat = [PREDICTOR_MATRIX_KEY]
this_nam_file_name = nam_file_name
this_nam_dict = read_nam_maps(this_nam_file_name, nam_feature_name)
#print(this_nam_dict[PREDICTOR_MATRIX_KEY].shape)
this_nam_tmpsurf = this_nam_dict[PREDICTOR_MATRIX_KEY][0, :,:,0]
this_nam_tmp2m = this_nam_dict[PREDICTOR_MATRIX_KEY][0, :,:,1]
this_nam_dpt = this_nam_dict[PREDICTOR_MATRIX_KEY][0, :,:,2]
#print('BEFORE UPSAMPLING: ', this_nam_dpt.shape)
up_this_nam_tmpsurf= map_upsampling(this_nam_tmpsurf)
up_this_nam_tmp2m = map_upsampling(this_nam_tmp2m)
up_this_nam_dpt = map_upsampling(this_nam_dpt)
#print('AFTER UPSAMPLING: ', up_this_nam_dpt.shape)
this_mur_file_name = mur_file_name
this_mur_dict = read_mur_map(this_mur_file_name, mur_feature_name)
this_mur_file = this_mur_dict[PREDICTOR_MATRIX_KEY][0, :, :, 0]
#print('MUR SIZE: ', this_mur_file.shape)
# filling the mur map with tmp surface:
for l in range(len(this_mur_file)):
for w in range(len(this_mur_file)):
if this_mur_file[l, w] == -32768:
this_mur_file[l, w] = up_this_nam_tmpsurf[l, w]
NETCDF_SST = this_mur_file
NETCDF_TMPDPT = numpy.subtract(up_this_nam_tmp2m , up_this_nam_dpt)
NETCDF_TMPSST = numpy.subtract(up_this_nam_tmp2m , this_mur_file)
NETCDF_DPTSST = numpy.subtract(up_this_nam_dpt , this_mur_file)
# downsampling:
NETCDF_TMPDPT = map_downsampling(NETCDF_TMPDPT)
NETCDF_TMPSST = map_downsampling(NETCDF_TMPSST)
NETCDF_DPTSST = map_downsampling(NETCDF_DPTSST)
THIS_NETCDF_SST = numpy.expand_dims(NETCDF_SST, axis=-1)
THIS_NETCDF_TMPDPT = numpy.expand_dims(NETCDF_TMPDPT, axis=-1)
THIS_NETCDF_TMPSST = numpy.expand_dims(NETCDF_TMPSST, axis=-1)
THIS_NETCDF_DPTSST = numpy.expand_dims(NETCDF_DPTSST, axis=-1)
THIS_NETCDF_SST_CUBE = numpy.expand_dims(THIS_NETCDF_SST, axis=0)
THIS_NETCDF_MIXED_CUBE = numpy.concatenate((THIS_NETCDF_TMPDPT, THIS_NETCDF_TMPSST, THIS_NETCDF_DPTSST),
axis = -1)
THIS_NETCDF_MIXED_CUBE = numpy.expand_dims(THIS_NETCDF_MIXED_CUBE, axis=0)
return { SST_MATRIX_KEY: THIS_NETCDF_SST_CUBE,
PREDICTOR_MATRIX_KEY: THIS_NETCDF_MIXED_CUBE,
PREDICTOR_NAMES_KEY: NETCDF_MIXED_NAMES}
def SST_Upsampling(lowe_res_sst_cube):
length = lowe_res_sst_cube.shape[0]
high_res_sst_cube = None
for m in range(length):
low_res_map = lowe_res_sst_cube[m, :, : , 0]
this_high_res_map = scipy.ndimage.zoom(low_res_map, 1.021, order=3)
this_high_res_map = numpy.expand_dims(this_high_res_map, axis = 0)
if high_res_sst_cube is None:
high_res_sst_cube = this_high_res_map
else:
high_res_sst_cube = numpy.concatenate([high_res_sst_cube, this_high_res_map], axis = 0)
high_res_sst_cube = numpy.expand_dims(high_res_sst_cube, axis = -1)
return high_res_sst_cube
def SST_384_cubes(sst_376_cube_names, cubes_dir):
for key in sst_376_cube_names:
print(('Process for {0} just started!').format(key))
low_res_cube_name = sst_376_cube_names[key][0]
low_res_cube_name = cubes_dir + low_res_cube_name
low_res_cube = load_cube(low_res_cube_name)
high_res_cube = SST_Upsampling(low_res_cube)
print('The shape after upsampling: ', high_res_cube.shape)
sst_cube_name = 'NETCDF_SST_CUBE_' + key + '.npz'
sst_cube_path = os.path.join(SAVE_CUBE_DIR, sst_cube_name)
savez_compressed(sst_cube_path, high_res_cube)
def mixed_cubes(nam_file_names, nam_feature_name, mur_file_names, mur_feature_name):
length = len(nam_file_names)
NETCDF_MIXED_CUBES = None
NETCDF_SST_CUBES = None
for i in range(length):
this_nam_file_name = nam_file_names[i]
this_mur_file_name = mur_file_names[i]
this_highres_cube = highres_features(this_nam_file_name, nam_feature_name, this_mur_file_name, mur_feature_name)
if NETCDF_MIXED_CUBES is None:
NETCDF_MIXED_CUBES = this_highres_cube[PREDICTOR_MATRIX_KEY]
NETCDF_SST_CUBES = this_highres_cube[SST_MATRIX_KEY]
else:
NETCDF_MIXED_CUBES = numpy.concatenate((NETCDF_MIXED_CUBES, this_highres_cube[PREDICTOR_MATRIX_KEY]), axis = 0)
NETCDF_SST_CUBES = numpy.concatenate((NETCDF_SST_CUBES, this_highres_cube[SST_MATRIX_KEY]), axis = 0)
return {SST_MATRIX_KEY : NETCDF_SST_CUBES,
PREDICTOR_MATRIX_KEY: NETCDF_MIXED_CUBES,
PREDICTOR_NAMES_KEY: NETCDF_GEN_NAMES,
SST_NAME_KEY: NETCDF_MUR_NAMES
}
#======================================================================================================
#=========================== Concatenate the cubes in order to create Tensor: =========================
#======================================================================================================
def concate_nam_cubes_files(netcdf_file_names, PREDICTOR_NAMES):
"""
concatenate the input maps for each day based on lead time prediction.
for instance the lead time is 6 hours and there are 3 cubes per each time cycle include 000, 003 and 006
based on "find_nam_cubes_name_hourpredict" function their names are selected, and this function creates the cube
of all three time prediction using concatenation.
input: netcdf_file_names
output: 3D cube
"""
cube_tensor = None
cubes_dict = {}
cancat_cube = None
match_name = None
cubenumber = 0
Depth = 4*len(PREDICTOR_NAMES)
cube_names = netcdf_file_names[1]
for this_file in range(len(cube_names)):
this_cube_name = cube_names[this_file]
this_cube_name_details = find_match_name (this_cube_name)
this_cube_match_name = this_cube_name_details ['cube_match_name']
this_cube_date_name = this_cube_name_details ['date']
this_cube_timecycle_name = this_cube_name_details ['timecycle']
#print('Name this_cube_match_name before if: ', this_cube_match_name)
this_cube_tensor = netcdf_file_names[0][this_file]
#print('Size this_cube_tensor before if: ', this_cube_tensor['predictor_matrix'].shape)
if cube_tensor is None:
cube_tensor = this_cube_tensor
match_name = this_cube_match_name
#print('Name cube_match_name after if: ', cube_match_name)
#print('Size cube_tensor after if: ', cube_tensor['predictor_matrix'].shape)
#print(this_cube_match_name)
elif match_name == this_cube_match_name:
#print(True)
cube_tensor = numpy.concatenate((cube_tensor[PREDICTOR_MATRIX_KEY], this_cube_tensor[PREDICTOR_MATRIX_KEY]), axis=-1)
cube_tensor = {PREDICTOR_MATRIX_KEY: cube_tensor}
#print('New Size of cube: ', cube_tensor['predictor_matrix'].shape)
Depth_cube = cube_tensor[PREDICTOR_MATRIX_KEY].shape[3]
if Depth_cube == Depth:
cube_name = 'cube_' + this_cube_date_name + '_' + this_cube_timecycle_name +'_036' + '_input.nc'
cube_tensor = {PREDICTOR_MATRIX_KEY: cube_tensor[PREDICTOR_MATRIX_KEY],
PREDICTOR_NAMES_KEY: 4 * PREDICTOR_NAMES,
CUBE_NAMES_KEY: cube_name}
cubes_dict[cubenumber] = cube_tensor
cubenumber = cubenumber + 1
else:
match_name = this_cube_match_name
#print('Name cube_match_name after else: ', cube_match_name)
cube_tensor = this_cube_tensor
#print('Size cube_tensor after else: ', cube_tensor['predictor_matrix'].shape)
return cubes_dict
def nam_cubes_dict(netcdf_file_names):
cubes_dict = None
keys_to_concat = [PREDICTOR_MATRIX_KEY]
numerator = len(netcdf_file_names)
for this_file in range(numerator):
cube_tensor = netcdf_file_names[this_file]
#print('Size this_cube_tensor before if: ', this_cube_tensor['predictor_matrix'].shape
if cubes_dict is None:
cubes_dict = copy.deepcopy(cube_tensor)
else:
cubes_dict = numpy.concatenate(
(cubes_dict[PREDICTOR_MATRIX_KEY], cube_tensor[PREDICTOR_MATRIX_KEY]), axis=0
)
cubes_dict = {PREDICTOR_MATRIX_KEY: cubes_dict}
return cubes_dict
def concate_mixed_cubes_files(normalized_mur_cubes):
"""
Mixed_cube means three generated feature manually include TMPsurface-SST, DPT-SST and TMPsurface-DPT.
This function using the name of maps generate the cube with depth 3 of having three mentioned maps.
"""
cancat_mur_cube = None
length = len(normalized_mur_cubes)
i = 0
while i < length:
m00 = normalized_mur_cubes[i][PREDICTOR_MATRIX_KEY]
m06 = normalized_mur_cubes[i+1][PREDICTOR_MATRIX_KEY]
m09 = normalized_mur_cubes[i+2][PREDICTOR_MATRIX_KEY]
m12 = normalized_mur_cubes[i+3][PREDICTOR_MATRIX_KEY]
this_cube_timecycle = numpy.concatenate(
(m00, m06, m09, m12), axis=-1)
this_cube_timecycle = numpy.expand_dims(
this_cube_timecycle, axis=0)
if cancat_mur_cube is None:
cancat_mur_cube = copy.deepcopy(this_cube_timecycle)
else:
cancat_mur_cube = numpy.concatenate(
(cancat_mur_cube, this_cube_timecycle), axis=0
)
i+=4
return cancat_mur_cube
def concate_sst_cubes_files(normalized_mur_cubes):
cancat_sst_cube = None
length = len(normalized_mur_cubes)
i = 0
while i < length:
m06 = normalized_mur_cubes[i+2][SST_MATRIX_KEY]
this_cube_timecycle = numpy.expand_dims(
m06, axis=0)
if cancat_sst_cube is None:
cancat_sst_cube = copy.deepcopy(this_cube_timecycle)
else:
cancat_sst_cube = numpy.concatenate(
(cancat_sst_cube, this_cube_timecycle), axis=0
)
i+=4
return cancat_sst_cube
#============================================================================
#==================== Normalization Step =============================
#============================================================================
def _update_normalization_params(intermediate_normalization_dict, new_values):
"""Updates normalization params for one predictor.
:param intermediate_normalization_dict: Dictionary with the following keys.
intermediate_normalization_dict['num_values']: Number of values on which
current estimates are based.
intermediate_normalization_dict['mean_value']: Current estimate for mean.
intermediate_normalization_dict['mean_of_squares']: Current mean of squared
values.
:param new_values: numpy array of new values (will be used to update
`intermediate_normalization_dict`).
:return: intermediate_normalization_dict: Same as input but with updated
values.
"""
if MEAN_VALUE_KEY not in intermediate_normalization_dict:
intermediate_normalization_dict = {
NUM_VALUES_KEY: 0,
MEAN_VALUE_KEY: 0.,
MEAN_OF_SQUARES_KEY: 0.
}
these_means = numpy.array([
intermediate_normalization_dict[MEAN_VALUE_KEY], numpy.mean(new_values)
])
these_weights = numpy.array([
intermediate_normalization_dict[NUM_VALUES_KEY], new_values.size
])
intermediate_normalization_dict[MEAN_VALUE_KEY] = numpy.average(
these_means, weights=these_weights)
these_means = numpy.array([
intermediate_normalization_dict[MEAN_OF_SQUARES_KEY],
numpy.mean(new_values ** 2)
])
intermediate_normalization_dict[MEAN_OF_SQUARES_KEY] = numpy.average(
these_means, weights=these_weights)
intermediate_normalization_dict[NUM_VALUES_KEY] += new_values.size
return intermediate_normalization_dict
def _get_standard_deviation(intermediate_normalization_dict):
"""Computes stdev from intermediate normalization params.
:param intermediate_normalization_dict: See doc for
`_update_normalization_params`.
:return: standard_deviation: Standard deviation.
"""
num_values = float(intermediate_normalization_dict[NUM_VALUES_KEY])
multiplier = num_values / (num_values - 1)
return numpy.sqrt(multiplier * (
intermediate_normalization_dict[MEAN_OF_SQUARES_KEY] -
intermediate_normalization_dict[MEAN_VALUE_KEY] ** 2
))
def get_nam_normalization_params(netcdf_file_names, PREDICTOR_NAMES):
"""Computes normalization params (mean and stdev) for each predictor.
:param netcdf_file_names: 1-D list of paths to input files.
:return: normalization_dict: See input doc for `normalize_images`.
"""
predictor_names = None
norm_dict_by_predictor = None
for this_file_name in netcdf_file_names:
#print('Reading data from: "{0:s}"...'.format(this_file_name))
this_image_dict = read_nam_maps(this_file_name, PREDICTOR_NAMES)
if predictor_names is None:
predictor_names = this_image_dict[PREDICTOR_NAMES_KEY]
norm_dict_by_predictor = [{}] * len(predictor_names)
for m in range(len(predictor_names)):
norm_dict_by_predictor[m] = _update_normalization_params(
intermediate_normalization_dict=norm_dict_by_predictor[m],
new_values=this_image_dict[PREDICTOR_MATRIX_KEY][..., m]
)
print('\n')
normalization_dict = {}
for m in range(len(predictor_names)):
this_mean = norm_dict_by_predictor[m][MEAN_VALUE_KEY]
this_stdev = _get_standard_deviation(norm_dict_by_predictor[m])
normalization_dict[predictor_names[m]] = numpy.array(
[this_mean, this_stdev]
)
message_string = (
'Mean and standard deviation for "{0:s}" = {1:.4f}, {2:.4f}'
).format(predictor_names[m], this_mean, this_stdev)
print(message_string)
return normalization_dict
def normalize_sst_map(
predictor_matrix, predictor_names, normalization_dict=None):
normalization_dict = {}
this_mean = numpy.mean(predictor_matrix)
this_stdev = numpy.std(predictor_matrix, ddof=1)
normalization_dict = numpy.array(
[this_mean, this_stdev]
)
predictor_matrix = (
(predictor_matrix - this_mean) / float(this_stdev))
return {
SST_MATRIX_KEY: predictor_matrix,
SST_NAME_KEY: predictor_names
}
def get_sst_normalization_params(NETCDF_HIGHRES_CUBES, PREDICTOR_NAMES):
"""Computes normalization params (mean and stdev) for each predictor.
:param netcdf_file_names: 1-D list of paths to input files.
:return: normalization_dict: See input doc for `normalize_images`.
"""
predictor_names = None
norm_dict_by_predictor = None
length = NETCDF_HIGHRES_CUBES[SST_MATRIX_KEY].shape[0]
#print(length)
for i in range(length):
#print('Reading data from: "{0:s}"...'.format(this_file_name))
this_image_dict = NETCDF_HIGHRES_CUBES[SST_MATRIX_KEY][i, :, :, :]
if predictor_names is None:
predictor_names = NETCDF_HIGHRES_CUBES[SST_NAME_KEY]
norm_dict_by_predictor = [{}] * len(predictor_names)
norm_dict_by_predictor = _update_normalization_params(
intermediate_normalization_dict=norm_dict_by_predictor,
new_values = this_image_dict
)
print('\n')
normalization_dict = {}
this_mean = norm_dict_by_predictor[MEAN_VALUE_KEY]
this_stdev = _get_standard_deviation(norm_dict_by_predictor)
normalization_dict = numpy.array([this_mean, this_stdev])
message_string = (
'Mean and standard deviation for "{0:s}" = {1:.4f}, {2:.4f}'
).format('SST', this_mean, this_stdev)
print(message_string)
return normalization_dict
## this function work when we are using 4 high resolution maps cube!!!
def get_mixed_normalization_params(NETCDF_HIGHRES_CUBES, PREDICTOR_NAMES):
predictor_names = None
norm_dict_by_predictor = None
length = NETCDF_HIGHRES_CUBES[PREDICTOR_MATRIX_KEY].shape[0]
#print(length)
for i in range(length):
#print('Reading data from: "{0:s}"...'.format(this_file_name))
this_image_dict = NETCDF_HIGHRES_CUBES[PREDICTOR_MATRIX_KEY][i, :, :, :]
#print(this_image_dict.shape)
if predictor_names is None:
predictor_names = NETCDF_HIGHRES_CUBES[PREDICTOR_NAMES_KEY]
norm_dict_by_predictor = [{}] * len(predictor_names)
#print(len(predictor_names))
for m in range(len(predictor_names)):
norm_dict_by_predictor[m] = _update_normalization_params(
intermediate_normalization_dict = norm_dict_by_predictor[m],
new_values=this_image_dict[..., m]
)
normalization_dict = {}
for m in range(len(predictor_names)):
this_mean = norm_dict_by_predictor[m][MEAN_VALUE_KEY]
this_stdev = _get_standard_deviation(norm_dict_by_predictor[m])
normalization_dict[predictor_names[m]] = numpy.array(
[this_mean, this_stdev]
)
message_string = (
'Mean and standard deviation for "{0:s}" = {1:.4f}, {2:.4f}'
).format(predictor_names[m], this_mean, this_stdev)
print(message_string)
def normalize_nam_maps(
predictor_matrix, predictor_names, normalization_dict=None):
"""Normalizes images to z-scores.
E = number of examples (storm objects) in file
M = number of rows in each storm-centered grid
N = number of columns in each storm-centered grid
C = number of channels (predictor variables)
:param predictor_matrix: E-by-M-by-N-by-C numpy array of predictor values.
:param predictor_names: length-C list of predictor names.
:param normalization_dict: Dictionary. Each key is the name of a predictor
value, and the corresponding value is a length-2 numpy array with
[mean, standard deviation]. If `normalization_dict is None`, mean and
standard deviation will be computed for each predictor.
:return: predictor_matrix: Normalized version of input.
:return: normalization_dict: See doc for input variable. If input was None,
this will be a newly created dictionary. Otherwise, this will be the
same dictionary passed as input.
"""
num_predictors = len(predictor_names)
if normalization_dict is None:
normalization_dict = {}
for m in range(num_predictors):
this_mean = numpy.mean(predictor_matrix[..., m])
this_stdev = numpy.std(predictor_matrix[..., m], ddof=1)
normalization_dict[predictor_names[m]] = numpy.array(
[this_mean, this_stdev]
)
for m in range(num_predictors):
this_mean = normalization_dict[predictor_names[m]][0]
this_stdev = normalization_dict[predictor_names[m]][1]
predictor_matrix[..., m] = (
(predictor_matrix[..., m] - this_mean) / float(this_stdev)
)
return {
PREDICTOR_MATRIX_KEY: predictor_matrix,
PREDICTOR_NAMES_KEY: predictor_names
}
def denormalize_nam_maps(predictor_matrix, predictor_names, normalization_dict):
"""Denormalizes images from z-scores back to original scales.
:param predictor_matrix: See doc for `normalize_images`.
:param predictor_names: Same.
:param normalization_dict: Same.
:return: predictor_matrix: Denormalized version of input.
"""
num_predictors = len(predictor_names)
for m in range(num_predictors):
this_mean = normalization_dict[predictor_names[m]][0]
this_stdev = normalization_dict[predictor_names[m]][1]
predictor_matrix[..., m] = (
this_mean + this_stdev * predictor_matrix[..., m]
)
return predictor_matrix
def normalize_many_cubes(netcdf_file_names, normalization_dict, predictor_names):
normmalized_cubes_dict = {}
for m in range(len(netcdf_file_names)):
this_cube = read_nam_maps(netcdf_file_names[m], predictor_names)
normmalized_cubes_dict[m] = normalize_nam_maps(
predictor_matrix = this_cube[PREDICTOR_MATRIX_KEY],
predictor_names = this_cube[PREDICTOR_NAMES_KEY],
normalization_dict = normalization_dict)
message_string = (
'The normalization of ' + 'netcdf_file_names is done!')
print(message_string)
return normmalized_cubes_dict, netcdf_file_names
def normalize_mixed_cubes(NETCDF_HIGHRES_CUBES, normalization_dict):
normmalized_cubes_dict = {}
length = NETCDF_HIGHRES_CUBES[PREDICTOR_MATRIX_KEY].shape[0]
for m in range(length):
this_cube = NETCDF_HIGHRES_CUBES[PREDICTOR_MATRIX_KEY][m, :, :, :]
normmalized_cubes_dict[m] = normalize_nam_maps(
predictor_matrix = this_cube,
predictor_names = NETCDF_HIGHRES_CUBES[PREDICTOR_NAMES_KEY],
normalization_dict = normalization_dict)
message_string = (
'The normalization of ' + 'netcdf_file_names is done!')
print(message_string)
return normmalized_cubes_dict
def normalize_sst_cubes(NETCDF_HIGHRES_CUBES, normalization_dict):
normmalized_cubes_dict = {}
length = NETCDF_HIGHRES_CUBES[SST_MATRIX_KEY].shape[0]
for m in range(length):
this_cube = NETCDF_HIGHRES_CUBES[SST_MATRIX_KEY][m, :, :, :]
normmalized_cubes_dict[m] = normalize_sst_map(
predictor_matrix = this_cube,
predictor_names = NETCDF_HIGHRES_CUBES[SST_NAME_KEY],
normalization_dict = normalization_dict)
message_string = (
'The normalization of ' + 'netcdf_file_names is done!')
print(message_string)
return normmalized_cubes_dict
#===============================================================================
#============================ Target preparation ===============================
#===============================================================================
def plot_visibility_cases(data, year, margin):
vis = data['VIS_Cat'].value_counts()
nan = data['VIS_Cat'].isna().sum()
values = vis.values
fvalues = numpy.insert(values, 0, nan)
names = ["VIS_nan", "VIS > 4mi", "1mi< VIS<= 4mi", "VIS =<1mi"]
df = pandas.DataFrame(columns = ["VIS-Cat", "VIS_Count"])
df["VIS-Cat"] = names
df["VIS_Count"] = fvalues
# plot the count
fig, ax = pyplot.subplots(figsize = FIG_DEFULT_SIZE)
ax = sns.barplot(x = "VIS-Cat", y="VIS_Count", data=df,
palette="Blues_d")
xlocs, xlabs = pyplot.xticks()
pyplot.xlabel('Visibility class')
pyplot.ylabel('The number of cases')
txt = ('The number of visibility cases for {0}').format(year)
pyplot.title(txt)
for i, v in enumerate(df["VIS_Count"]):
pyplot.text(xlocs[i] , v + margin, str(v),
fontsize=12, color='red',
horizontalalignment='center', verticalalignment='center')
pyplot.show()
def reading_csv_target_file(csv_file_name):
data = pandas.read_csv(csv_file_name, header=0, sep=',')
for i in range(len(data)):
namesplit = os.path.split(data['Name'][i])[-1]
year = namesplit.replace(namesplit[4:], '')
month = namesplit.replace(namesplit[0:4], '').replace(namesplit[6:], '')
day = namesplit.replace(namesplit[0:6], '').replace(namesplit[8:], '')
timecycle = namesplit.replace(namesplit[0:9], '')
data['Year'] = year
data['Month']= month
data['Day']= day
data['TimeCycle'] = timecycle
return data
class targets():
def __init__(self, targets_file_names, training_years, validation_years, testing_years, DEFAULT_TARGET_DIR_NAME, priority_calss):
self.targets_file_names = targets_file_names
self.DEFAULT_TARGET_DIR_NAME = DEFAULT_TARGET_DIR_NAME
self.training_years = training_years
self.validation_years = validation_years
self.testing_years = testing_years
self.priority_calss = priority_calss
def multiclass_target(self):
training_targets = pandas.DataFrame()
validation_targets = pandas.DataFrame()
testing_targets = pandas.DataFrame()
TRAIN_FRAMES = []
for i in self.training_years:
year_name = self.targets_file_names[i]
file_name = self.DEFAULT_TARGET_DIR_NAME + year_name
year_data = pandas.read_csv(file_name, header=0, sep=',')
year_data = year_data['VIS_Cat']
TRAIN_FRAMES.append(year_data)
training_targets = pandas.concat(TRAIN_FRAMES)
#print(training_targets.shape)
categorical_training_targets = to_categorical(training_targets)
#print(categorical_training_targets.shape)
VALID_FRAMES = []
for j in self.validation_years:
year_name = self.targets_file_names[j]
file_name = self.DEFAULT_TARGET_DIR_NAME + year_name
year_data = pandas.read_csv(file_name, header=0, sep=',')
year_data = year_data['VIS_Cat']
VALID_FRAMES.append(year_data)
validation_targets = pandas.concat(VALID_FRAMES)
#print(validation_targets.shape)
categorical_validation_targets = to_categorical(validation_targets)
#print(categorical_validation_targets.shape)
TEST_FRAMES = []
for k in self.testing_years:
year_name = self.targets_file_names[k]
file_name = self.DEFAULT_TARGET_DIR_NAME + year_name
year_data = pandas.read_csv(file_name, header=0, sep=',')
year_data = year_data['VIS_Cat']
TEST_FRAMES.append(year_data)
testing_targets = pandas.concat(TEST_FRAMES)
#print(testing_targets.shape)
categorical_testing_targets = to_categorical(testing_targets)
#print(categorical_testing_targets.shape)
return [training_targets, categorical_training_targets,
validation_targets, categorical_validation_targets,
testing_targets, categorical_testing_targets]
def binary_target(self):
training_targets = pandas.DataFrame()
validation_targets = pandas.DataFrame()
testing_targets =
|
pandas.DataFrame()
|
pandas.DataFrame
|
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
import csv
from datetime import datetime
from io import StringIO
import os
import platform
from tempfile import TemporaryFile
from urllib.error import URLError
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
import pandas._testing as tm
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_empty_decimal_marker(all_parsers):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = "Only length-1 decimal markers supported"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), decimal="")
def test_bad_stream_exception(all_parsers, csv_dir_path):
# see gh-13652
#
# This test validates that both the Python engine and C engine will
# raise UnicodeDecodeError instead of C engine raising ParserError
# and swallowing the exception that caused read to fail.
path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup("utf-8")
parser = all_parsers
msg = "'utf-8' codec can't decode byte"
# Stream must be binary UTF8.
with open(path, "rb") as handle, codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
) as stream:
with pytest.raises(UnicodeDecodeError, match=msg):
parser.read_csv(stream)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
def test_squeeze(all_parsers):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True)
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
def test_malformed(all_parsers):
# see gh-6607
parser = all_parsers
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#")
@pytest.mark.parametrize("nrows", [5, 3, None])
def test_malformed_chunks(all_parsers, nrows):
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
parser = all_parsers
msg = "Expected 3 fields in line 6, saw 5"
reader = parser.read_csv(
StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
)
with pytest.raises(ParserError, match=msg):
reader.read(nrows)
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
dtype=np.int64,
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_no_index_name(all_parsers, csv_dir_path):
parser = all_parsers
csv2 = os.path.join(csv_dir_path, "test2.csv")
result = parser.read_csv(csv2, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
[1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
[0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
[1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
[-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
],
columns=["A", "B", "C", "D", "E"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
]
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_wrong_num_columns(all_parsers):
# Too few columns.
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
parser = all_parsers
msg = "Expected 6 fields in line 3, saw 7"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
def test_read_duplicate_index_explicit(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(all_parsers):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"A,B\nTrue,1\nFalse,2\nTrue,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
dict(true_values=["yes", "Yes", "YES"], false_values=["no", "NO", "No"]),
DataFrame(
[[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
columns=["A", "B"],
),
),
(
"A,B\nTRUE,1\nFALSE,2\nTRUE,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nfoo,bar\nbar,foo",
dict(true_values=["foo"], false_values=["bar"]),
DataFrame([[True, False], [False, True]], columns=["A", "B"]),
),
],
)
def test_parse_bool(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_int_conversion(all_parsers):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"],
)
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
@pytest.mark.parametrize("index_col", [0, "index"])
def test_read_chunksize_with_index(all_parsers, index_col):
parser = all_parsers
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = parser.read_csv(StringIO(data), index_col=0, chunksize=2)
expected = DataFrame(
[
["foo", 2, 3, 4, 5],
["bar", 7, 8, 9, 10],
["baz", 12, 13, 14, 15],
["qux", 12, 13, 14, 15],
["foo2", 12, 13, 14, 15],
["bar2", 12, 13, 14, 15],
],
columns=["index", "A", "B", "C", "D"],
)
expected = expected.set_index("index")
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
def test_read_chunksize_bad(all_parsers, chunksize):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), chunksize=chunksize)
@pytest.mark.parametrize("chunksize", [2, 8])
def test_read_chunksize_and_nrows(all_parsers, chunksize):
# see gh-15755
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), expected)
def test_read_chunksize_and_nrows_changing_size(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=8, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
with pytest.raises(StopIteration, match=""):
reader.get_chunk(size=3)
def test_get_chunk_passed_chunksize(all_parsers):
parser = all_parsers
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
reader = parser.read_csv(StringIO(data), chunksize=2)
result = reader.get_chunk()
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(), dict(index_col=0)])
def test_read_chunksize_compat(all_parsers, kwargs):
# see gh-12185
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
reader = parser.read_csv(StringIO(data), chunksize=2, **kwargs)
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), result)
def test_read_chunksize_jagged_names(all_parsers):
# see gh-23509
parser = all_parsers
data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
reader = parser.read_csv(StringIO(data), names=range(10), chunksize=4)
result = concat(reader)
tm.assert_frame_equal(result, expected)
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = dict(index_col=0)
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
parser = TextParser(data_list, chunksize=2, **kwargs)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_iterator(all_parsers):
# see gh-6607
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
expected = parser.read_csv(StringIO(data), **kwargs)
reader = parser.read_csv(StringIO(data), iterator=True, **kwargs)
first_chunk = reader.read(3)
tm.assert_frame_equal(first_chunk, expected[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, expected[3:])
def test_iterator2(all_parsers):
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result[0], expected)
def test_reader_list(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_reader_list_skiprows(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, skiprows=[1], **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[1:3])
def test_iterator_stop_on_chunksize(all_parsers):
# gh-3967: stopping iteration when chunksize is specified
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), chunksize=1)
result = list(reader)
assert len(result) == 3
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(concat(result), expected)
@pytest.mark.parametrize(
"kwargs", [dict(iterator=True, chunksize=1), dict(iterator=True), dict(chunksize=1)]
)
def test_iterator_skipfooter_errors(all_parsers, kwargs):
msg = "'skipfooter' not supported for 'iteration'"
parser = all_parsers
data = "a\n1\n2"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, **kwargs)
def test_nrows_skipfooter_errors(all_parsers):
msg = "'skipfooter' not supported with 'nrows'"
data = "a\n1\n2\n3\n4\n5\n6"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"""foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
""",
dict(index_col=0, names=["index", "A", "B", "C", "D"]),
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),
columns=["A", "B", "C", "D"],
),
),
(
"""foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
""",
dict(index_col=[0, 1], names=["index1", "index2", "A", "B", "C", "D"]),
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
],
names=["index1", "index2"],
),
columns=["A", "B", "C", "D"],
),
),
],
)
def test_pass_names_with_index(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_no_level_names(all_parsers, index_col):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
headless_data = "\n".join(data.split("\n")[1:])
names = ["A", "B", "C", "D"]
parser = all_parsers
result = parser.read_csv(
StringIO(headless_data), index_col=index_col, header=None, names=names
)
expected = parser.read_csv(StringIO(data), index_col=index_col)
# No index names in headless data.
expected.index.names = [None] * 2
tm.assert_frame_equal(result, expected)
def test_multi_index_no_level_names_implicit(all_parsers):
parser = all_parsers
data = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
]
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected,header",
[
("a,b", DataFrame(columns=["a", "b"]), [0]),
(
"a,b\nc,d",
DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])),
[0, 1],
),
],
)
@pytest.mark.parametrize("round_trip", [True, False])
def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):
# see gh-14545
parser = all_parsers
data = expected.to_csv(index=False) if round_trip else data
result = parser.read_csv(StringIO(data), header=header)
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(all_parsers):
parser = all_parsers
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
result = parser.read_csv(StringIO(data), sep=" ")
expected = DataFrame(
[[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]],
columns=["Unnamed: 0", "id", "c0", "c1", "c2"],
)
tm.assert_frame_equal(result, expected)
def test_read_csv_parse_simple_list(all_parsers):
parser = all_parsers
data = """foo
bar baz
qux foo
foo
bar"""
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"])
tm.assert_frame_equal(result, expected)
@tm.network
def test_url(all_parsers, csv_dir_path):
# TODO: FTP testing
parser = all_parsers
kwargs = dict(sep="\t")
url = (
"https://raw.github.com/pandas-dev/pandas/master/"
"pandas/tests/io/parser/data/salaries.csv"
)
url_result = parser.read_csv(url, **kwargs)
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
tm.assert_frame_equal(url_result, local_result)
@pytest.mark.slow
def test_local_file(all_parsers, csv_dir_path):
parser = all_parsers
kwargs = dict(sep="\t")
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
url = "file://localhost/" + local_path
try:
url_result = parser.read_csv(url, **kwargs)
tm.assert_frame_equal(url_result, local_result)
except URLError:
# Fails on some systems.
pytest.skip("Failing on: " + " ".join(platform.uname()))
def test_path_path_lib(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_local_path(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv, lambda p: parser.read_csv(p, index_col=0)
)
tm.assert_frame_equal(df, result)
def test_nonexistent_path(all_parsers):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
# GH#29233 "File foo" instead of "File b'foo'"
parser = all_parsers
path = "{}.csv".format(tm.rands(10))
msg = f"File {path} does not exist" if parser.engine == "c" else r"\[Errno 2\]"
with pytest.raises(FileNotFoundError, match=msg) as e:
parser.read_csv(path)
filename = e.value.filename
assert path == filename
def test_missing_trailing_delimiters(all_parsers):
parser = all_parsers
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]],
columns=["A", "B", "C", "D"],
)
tm.assert_frame_equal(result, expected)
def test_skip_initial_space(all_parsers):
data = (
'"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
"1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, "
"314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, "
"70.06056, 344.98370, 1, 1, -0.689265, -0.692787, "
"0.212036, 14.7674, 41.605, -9999.0, -9999.0, "
"-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128"
)
parser = all_parsers
result = parser.read_csv(
StringIO(data),
names=list(range(33)),
header=None,
na_values=["-9999.0"],
skipinitialspace=True,
)
expected = DataFrame(
[
[
"09-Apr-2012",
"01:10:18.300",
2456026.548822908,
12849,
1.00361,
1.12551,
330.65659,
355626618.16711,
73.48821,
314.11625,
1917.09447,
179.71425,
80.0,
240.0,
-350,
70.06056,
344.9837,
1,
1,
-0.689265,
-0.692787,
0.212036,
14.7674,
41.605,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
0,
12,
128,
]
]
)
tm.assert_frame_equal(result, expected)
def test_trailing_delimiters(all_parsers):
# see gh-2442
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=False)
expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(all_parsers):
# https://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
parser = all_parsers
result = parser.read_csv(
StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
)
assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie'
tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))
def test_int64_min_issues(all_parsers):
# see gh-2599
parser = all_parsers
data = "A,B\n0,0\n0,"
result = parser.read_csv(StringIO(data))
expected = DataFrame({"A": [0, 0], "B": [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(all_parsers):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
{
"Numbers": [
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194,
]
}
)
tm.assert_frame_equal(result, expected)
def test_chunks_have_consistent_numerical_type(all_parsers):
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
# Coercions should work without warnings.
with tm.assert_produces_warning(None):
result = parser.read_csv(StringIO(data))
assert type(result.a[0]) is np.float64
assert result.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(all_parsers):
warning_type = None
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["a", "b"] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if parser.engine == "c" and parser.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = parser.read_csv(StringIO(data))
assert df.a.dtype == np.object
@pytest.mark.parametrize("sep", [" ", r"\s+"])
def test_integer_overflow_bug(all_parsers, sep):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None, sep=sep)
expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]])
tm.assert_frame_equal(result, expected)
def test_catch_too_many_names(all_parsers):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
parser = all_parsers
msg = (
"Too many columns specified: expected 4 and found 3"
if parser.engine == "c"
else "Number of passed names did not match "
"number of header fields in the file"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
def test_ignore_leading_whitespace(all_parsers):
# see gh-3374, gh-6607
parser = all_parsers
data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"
result = parser.read_csv(StringIO(data), sep=r"\s+")
expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(all_parsers):
# see gh-10022
parser = all_parsers
data = "\n hello\nworld\n"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([" hello", "world"])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(all_parsers):
# see gh-10184
data = "x,y"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(columns=["y"], index=Index([], name="x"))
tm.assert_frame_equal(result, expected)
def test_empty_with_multi_index(all_parsers):
# see gh-10467
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=["x", "y"])
expected = DataFrame(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"])
)
tm.assert_frame_equal(result, expected)
def test_empty_with_reversed_multi_index(all_parsers):
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"])
)
tm.assert_frame_equal(result, expected)
def test_float_parser(all_parsers):
# see gh-9565
parser = all_parsers
data = "45e-1,4.5,45.,inf,-inf"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(",")]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(all_parsers):
# see gh-12215
df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]})
data = df.to_csv(index=False)
parser = all_parsers
for precision in parser.float_precision_choices:
df_roundtrip = parser.read_csv(StringIO(data), float_precision=precision)
tm.assert_frame_equal(df_roundtrip, df)
@pytest.mark.parametrize("conv", [None, np.int64, np.uint64])
def test_int64_overflow(all_parsers, conv):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
parser = all_parsers
if conv is None:
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
"00013007854817840016671868",
"00013007854817840016749251",
"00013007854817840016754630",
"00013007854817840016781876",
"00013007854817840017028824",
"00013007854817840017963235",
"00013007854817840018860166",
],
columns=["ID"],
)
tm.assert_frame_equal(result, expected)
else:
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
msg = (
"(Python int too large to convert to C long)|"
"(long too big to convert)|"
"(int too big to convert)"
)
with pytest.raises(OverflowError, match=msg):
parser.read_csv(StringIO(data), converters={"ID": conv})
@pytest.mark.parametrize(
"val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min]
)
def test_int64_uint64_range(all_parsers, val):
# These numbers fall right inside the int64-uint64
# range, so they should be parsed as string.
parser = all_parsers
result = parser.read_csv(StringIO(str(val)), header=None)
expected = DataFrame([val])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val", [np.iinfo(np.uint64).max + 1, np.iinfo(np.int64).min - 1]
)
def test_outside_int64_uint64_range(all_parsers, val):
# These numbers fall just outside the int64-uint64
# range, so they should be parsed as string.
parser = all_parsers
result = parser.read_csv(StringIO(str(val)), header=None)
expected = DataFrame([str(val)])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("exp_data", [[str(-1), str(2 ** 63)], [str(2 ** 63), str(-1)]])
def test_numeric_range_too_wide(all_parsers, exp_data):
# No numerical dtype can hold both negative and uint64
# values, so they should be cast as string.
parser = all_parsers
data = "\n".join(exp_data)
expected = DataFrame(exp_data)
result = parser.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("iterator", [True, False])
def test_empty_with_nrows_chunksize(all_parsers, iterator):
# see gh-9535
parser = all_parsers
expected = DataFrame(columns=["foo", "bar"])
nrows = 10
data = StringIO("foo,bar\n")
if iterator:
result = next(iter(parser.read_csv(data, chunksize=nrows)))
else:
result = parser.read_csv(data, nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected,msg",
[
# gh-10728: WHITESPACE_LINE
(
"a,b,c\n4,5,6\n ",
dict(),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# gh-10548: EAT_LINE_COMMENT
(
"a,b,c\n4,5,6\n#comment",
dict(comment="#"),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_CRNL_NOP
(
"a,b,c\n4,5,6\n\r",
dict(),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_COMMENT
(
"a,b,c\n4,5,6#comment",
dict(comment="#"),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# SKIP_LINE
(
"a,b,c\n4,5,6\nskipme",
dict(skiprows=[2]),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_LINE_COMMENT
(
"a,b,c\n4,5,6\n#comment",
dict(comment="#", skip_blank_lines=False),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# IN_FIELD
(
"a,b,c\n4,5,6\n ",
dict(skip_blank_lines=False),
DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]),
None,
),
# EAT_CRNL
(
"a,b,c\n4,5,6\n\r",
dict(skip_blank_lines=False),
DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]),
None,
),
# ESCAPED_CHAR
(
"a,b,c\n4,5,6\n\\",
dict(escapechar="\\"),
None,
"(EOF following escape character)|(unexpected end of data)",
),
# ESCAPE_IN_QUOTED_FIELD
(
'a,b,c\n4,5,6\n"\\',
dict(escapechar="\\"),
None,
"(EOF inside string starting at row 2)|(unexpected end of data)",
),
# IN_QUOTED_FIELD
(
'a,b,c\n4,5,6\n"',
dict(escapechar="\\"),
None,
"(EOF inside string starting at row 2)|(unexpected end of data)",
),
],
ids=[
"whitespace-line",
"eat-line-comment",
"eat-crnl-nop",
"eat-comment",
"skip-line",
"eat-line-comment",
"in-field",
"eat-crnl",
"escaped-char",
"escape-in-quoted-field",
"in-quoted-field",
],
)
def test_eof_states(all_parsers, data, kwargs, expected, msg):
# see gh-10728, gh-10548
parser = all_parsers
if expected is None:
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]])
def test_uneven_lines_with_usecols(all_parsers, usecols):
# see gh-12203
parser = all_parsers
data = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10"""
if usecols is None:
# Make sure that an error is still raised
# when the "usecols" parameter is not provided.
msg = r"Expected \d+ fields in line \d+, saw \d+"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
else:
expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]})
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
# First, check to see that the response of parser when faced with no
# provided columns raises the correct error, with or without usecols.
("", dict(), None),
("", dict(usecols=["X"]), None),
(
",,",
dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]),
DataFrame(columns=["X"], index=[0], dtype=np.float64),
),
(
"",
dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]),
DataFrame(columns=["X"]),
),
],
)
def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
# see gh-12493
parser = all_parsers
if expected is None:
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,expected",
[
# gh-8661, gh-8679: this should ignore six lines, including
# lines with trailing whitespace and blank lines.
(
dict(
header=None,
delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True,
),
DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]),
),
# gh-8983: test skipping set of rows after a row with trailing spaces.
(
dict(
delim_whitespace=True, skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True
),
DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}),
),
],
)
def test_trailing_spaces(all_parsers, kwargs, expected):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
parser = all_parsers
result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
tm.assert_frame_equal(result, expected)
def test_raise_on_sep_with_delim_whitespace(all_parsers):
# see gh-6607
data = "a b c\n1 2 3"
parser = all_parsers
with pytest.raises(ValueError, match="you can only specify one"):
parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True)
@pytest.mark.parametrize("delim_whitespace", [True, False])
def test_single_char_leading_whitespace(all_parsers, delim_whitespace):
# see gh-9710
parser = all_parsers
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({"MyColumn": list("abab")})
result = parser.read_csv(
StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"sep,skip_blank_lines,exp_data",
[
(",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(
",",
False,
[
[1.0, 2.0, 4.0],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5.0, np.nan, 10.0],
[np.nan, np.nan, np.nan],
[-70.0, 0.4, 1.0],
],
),
],
)
def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data):
parser = all_parsers
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
if sep == r"\s+":
data = data.replace(",", " ")
result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines)
expected = DataFrame(exp_data, columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_whitespace_lines(all_parsers):
parser = all_parsers
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected",
[
(
""" A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
""",
DataFrame(
[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
columns=["A", "B", "C", "D"],
index=["a", "b", "c"],
),
),
(
" a b c\n1 2 3 \n4 5 6\n 7 8 9",
DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]),
),
],
)
def test_whitespace_regex_separator(all_parsers, data, expected):
# see gh-6607
parser = all_parsers
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
def test_verbose_read(all_parsers, capsys):
parser = all_parsers
data = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
# Engines are verbose in different ways.
parser.read_csv(StringIO(data), verbose=True)
captured = capsys.readouterr()
if parser.engine == "c":
assert "Tokenization took:" in captured.out
assert "Parser memory cleanup took:" in captured.out
else: # Python engine
assert captured.out == "Filled 3 NA values in column a\n"
def test_verbose_read2(all_parsers, capsys):
parser = all_parsers
data = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
parser.read_csv(StringIO(data), verbose=True, index_col=0)
captured = capsys.readouterr()
# Engines are verbose in different ways.
if parser.engine == "c":
assert "Tokenization took:" in captured.out
assert "Parser memory cleanup took:" in captured.out
else: # Python engine
assert captured.out == "Filled 1 NA values in column a\n"
def test_iteration_open_handle(all_parsers):
parser = all_parsers
kwargs = dict(squeeze=True, header=None)
with tm.ensure_clean() as path:
with open(path, "w") as f:
f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
with open(path, "r") as f:
for line in f:
if "CCC" in line:
break
result = parser.read_csv(f, **kwargs)
expected = Series(["DDD", "EEE", "FFF", "GGG"], name=0)
|
tm.assert_series_equal(result, expected)
|
pandas._testing.assert_series_equal
|
# @file riverlog_for_gis.py
# @brief riverlog related library, share with DevZone
# @author <EMAIL>
import requests
import json
import os
import pandas as pd
from datetime import timedelta, datetime,date
import time
from pandas.api.types import is_numeric_dtype
def url_get(filename,url,reload=False):
"""
get url to file
"""
#print("filename=%s,url=%s" %(filename,url))
if os.path.isfile(filename) and reload==False:
return
else:
r = requests.get(url, params = {})
open(filename, 'wb').write(r.content)
def load_json_local(filename,file_id):
"""
load json file and transfer to panda
hardcode: handle json with data in 'data'
"""
with open(filename, 'r') as json_file:
data_head = json.load(json_file)
if file_id=="elev-gridData":
data = data_head['data']['data']
else:
data = data_head['data']
if len(data)>0:
cols = data[0].keys()
else:
return None
out = []
for row in data:
item = []
for c in cols:
item.append(row.get(c, {}))
out.append(item)
return pd.DataFrame(out, columns=cols)
def api_to_csv(api_id,pars,reload=False):
"""
get api data and save to csv.
api_id: api_path with '-' as delimiter
pars = [], put parameters as string
"""
api_map={
"rain-dailySum":"https://riverlog.lass-net.org/rain/dailySum?year=%i",
"rain-10minSum":"https://riverlog.lass-net.org/rain/10minSum?date=%s",
"rain-station":"https://riverlog.lass-net.org/rain/station",
"rain-rainData":"https://riverlog.lass-net.org/rain/rainData?date=%s&minLat=%s&maxLat=%s&minLng=%s&maxLng=%s",
"waterLevel-station":"https://riverlog.lass-net.org/waterLevel/station",
"waterLevel-waterLevelData":"https://riverlog.lass-net.org/waterLevel/waterLevelData?date=%s&minLat=%s&maxLat=%s&minLng=%s&maxLng=%s",
"waterLevelDrain-station":"https://riverlog.lass-net.org/waterLevelDrain/station",
"waterLevelDrain-waterLevelDrainData":"https://riverlog.lass-net.org/waterLevelDrain/waterLevelDrainData?date=%s&minLat=%s&maxLat=%s&minLng=%s&maxLng=%s",
"waterLevelAgri-station":"https://riverlog.lass-net.org/waterLevelAgri/station",
"waterLevelAgri-waterLevelAgriData":"https://riverlog.lass-net.org/waterLevelAgri/waterLevelAgriData?date=%s&minLat=%s&maxLat=%s&minLng=%s&maxLng=%s",
"sewer-station":"https://riverlog.lass-net.org/sewer/station",
"sewer-sewerData":"https://riverlog.lass-net.org/sewer/sewerData?date=%s&minLat=%s&maxLat=%s&minLng=%s&maxLng=%s",
"tide-station":"https://riverlog.lass-net.org/tide/station",
"tide-tideData":"https://riverlog.lass-net.org/tide/tideData?date=%s&minLat=%s&maxLat=%s&minLng=%s&maxLng=%s",
"pump-station":"https://riverlog.lass-net.org/pump/station",
"pump-pumpData":"https://riverlog.lass-net.org/pump/pumpData?date=%s&minLat=%s&maxLat=%s&minLng=%s&maxLng=%s",
"reservoir-info":"https://riverlog.lass-net.org/reservoir/info",
"reservoir-reservoirData":"https://riverlog.lass-net.org/reservoir/reservoirData?date=%s",
"flood-station":"https://riverlog.lass-net.org/flood/station",
"flood-floodData":"https://riverlog.lass-net.org/flood/floodData?date=%s",
"alert-alertData":"https://riverlog.lass-net.org/alert/alertData?date=%s",
"alert-alertStatistic":"https://riverlog.lass-net.org/alert/alertStatistic?year=%s",
"alert-typhoonData":"https://riverlog.lass-net.org/alert/typhoonData?date=%s", # date can change to year
"elev-gridData":"https://riverlog.lass-net.org/elev/gridData?level=%s&minLat=%s&maxLat=%s&minLng=%s&maxLng=%s",
"statistic-waterUseAgriculture":"https://riverlog.lass-net.org/statistic/waterUseAgriculture",
"statistic-waterUseCultivation":"https://riverlog.lass-net.org/statistic/waterUseCultivation",
"statistic-waterUseLivestock":"https://riverlog.lass-net.org/statistic/waterUseLivestock",
"statistic-waterUseLiving":"https://riverlog.lass-net.org/statistic/waterUseLiving",
"statistic-waterUseIndustry":"https://riverlog.lass-net.org/statistic/waterUseIndustry",
"statistic-waterUseOverview":"https://riverlog.lass-net.org/statistic/waterUseOverview",
"statistic-monthWaterUse":"https://riverlog.lass-net.org/statistic/monthWaterUse",
"statistic-reservoirUse":"https://riverlog.lass-net.org/statistic/reservoirUse",
"statistic-reservoirSiltation":"https://riverlog.lass-net.org/statistic/reservoirSiltation"
}
url_fmt = api_map[api_id]
if pars:
if len(pars)==1:
url = url_fmt %(pars[0])
filename_prefix = "output/%s_%s" %(api_id,pars[0])
elif api_id=="":
pass
else: #5 parameters
url = url_fmt %(pars[0],pars[1],pars[2],pars[3],pars[4])
filename_prefix = "output/%s_%s" %(api_id,pars[0])
else: #None
url = url_fmt
filename_prefix = "output/%s" %(api_id)
cont = True
while cont:
filename = filename_prefix + ".json"
url_get(filename,url,reload)
df = load_json_local(filename,api_id)
if df is None:
print("%s don't have data" %(api_id))
return None
try:
cont = False
except:
print("Exception when process %s, retrying after 60s" %(filename))
if os.path.isfile(filename):
os.remove(filename)
time.sleep(60)
filename = filename_prefix + ".csv"
print("%s: %s saved, shape = %s" %(api_id,filename, str(df.shape)))
df.to_csv(filename)
return df
def proc_Sum(file_loc, file_src,file_row,file_geo):
"""
process dailySum from columns to rows, by area , add location geo info
1: merge Sum/Num to Avg
2. drop unused columns
3. columns to rows
4. merge geo info
"""
df = pd.read_csv(file_src)
df['central']=df['centralSum']/df['centralNum']
df['north']=df['northSum']/df['northNum']
df['south']=df['southSum']/df['southNum']
df1=df.drop(['centralSum', 'centralNum','northSum','northNum','southSum','southNum','Unnamed: 0'], axis=1)
df2 = (df1.set_index(["time"])
.stack()
.reset_index(name='Value')
.rename(columns={'level_1':'location'}))
df2.to_csv(file_row)
df_geo = pd.read_csv(file_loc)
df_merge = pd.merge(df2, df_geo, on='location')
df_final = df_merge.sort_values(by='time')
df_final.to_csv(file_geo)
return df_final
def minSum_range(start_str,end_str):
"""
get 10minSum by date range. merge to 1 CSV
"""
start_date = datetime.strptime(start_str, "%Y-%m-%d")
end_date = datetime.strptime(end_str, "%Y-%m-%d")
first_day = True
date_now = start_date
df_all = None
while True:
if date_now > end_date:
break
month = date_now.month
year = date_now.year
file_datestr = date_now.strftime("%Y-%m-%d")
df = api_to_csv("rain-10minSum",[file_datestr])
#df['datetime']="%s" %(date_now.strftime("%Y/%m/%d"))
if first_day:
df_all = df
first_day = False
else:
df_all = pd.concat([df_all,df])
date_now += timedelta(days=1)
filename = "output/%s_%s_%s.csv" %("rain-10minSum",start_str,end_str)
print("rain-10minSum saved %s, shape = %s" %(filename, str(df_all.shape)))
df_save = df_all.sort_values(by='time')
df_save.to_csv(filename,header=True,float_format="%.2f")
def api_to_csv_range(start_str,end_str,api_id,pars,sort_col_name):
"""
get api by date range. merge to 1 CSV
"""
start_date = datetime.strptime(start_str, "%Y-%m-%d")
end_date = datetime.strptime(end_str, "%Y-%m-%d")
first_day = True
date_now = start_date
df_all = None
while True:
if date_now > end_date:
break
month = date_now.month
year = date_now.year
file_datestr = date_now.strftime("%Y-%m-%d")
if pars is None:
df = api_to_csv(api_id,[file_datestr])
else:
real_pars = [file_datestr]
real_pars.extend(pars)
df = api_to_csv(api_id,real_pars)
#df['datetime']="%s" %(date_now.strftime("%Y/%m/%d"))
if first_day:
df_all = df
first_day = False
else:
df_all = pd.concat([df_all,df])
date_now += timedelta(days=1)
filename = "output/%s_%s_%s.csv" %(api_id,start_str,end_str)
print("%s saved %s, shape = %s" %(api_id,filename, str(df_all.shape)))
df_save = df_all.sort_values(by=sort_col_name)
df_save.to_csv(filename,header=True,float_format="%.2f")
return filename
def date_to_gmt8(date_str):
"""
transfer date string to GMT+8
ex: 2021-05-31T16:00:00.000Z
"""
#date object
date_obj = datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%S.000Z")
#+8 hour
hours_added = timedelta(hours = 8)
date_gmt8 = date_obj + hours_added
#output format
date_ret = date_gmt8.strftime("%Y-%m-%d %H:%M:%S")
return date_ret
def csv_add_gmt8(file_src,col_name,file_dest):
"""
add GMT8 time to CSV by re-format one column
"""
df = pd.read_csv(file_src)
df[col_name + "GMT8"] = df[col_name].apply(date_to_gmt8)
df_save=df.drop(['Unnamed: 0'], axis=1)
df_save.to_csv(file_dest)
return df_save
case_id=2 # 0: first version, 1: reservoir data by date, 2: for notebook debug
if case_id==0:
if 1: #get each api to CSV
api_to_csv("rain-dailySum",[2020])
api_to_csv("rain-10minSum",["2020-09-01"])
api_to_csv("rain-station",None)
api_to_csv("rain-rainData",["2020-09-01","23","24","121","122"])
api_to_csv("waterLevel-station",None)
api_to_csv("waterLevel-waterLevelData",["2020-09-01","23","24","121","122"])
api_to_csv("waterLevelDrain-station",None)
api_to_csv("waterLevelDrain-waterLevelDrainData",["2019-12-03","23","24","120","122"])
api_to_csv("waterLevelAgri-station",None)
api_to_csv("waterLevelAgri-waterLevelAgriData",["2019-12-03","23","24","120","122"])
api_to_csv("sewer-station",None)
api_to_csv("sewer-sewerData",["2019-12-02","24","25","121","122"])
api_to_csv("tide-station",None)
api_to_csv("tide-tideData",["2020-09-01","23","24","121","122"])
api_to_csv("pump-station",None)
api_to_csv("pump-pumpData",["2019-12-03","25","26","121","122"])
api_to_csv("reservoir-info",None)
api_to_csv("reservoir-reservoirData",["2020-09-01"])
api_to_csv("flood-station",None)
api_to_csv("flood-floodData",["2020-09-01"])
api_to_csv("alert-alertData",["2020-09-01"])
api_to_csv("alert-alertStatistic",[2020])
api_to_csv("alert-typhoonData",["2020-09-01"])
api_to_csv("elev-gridData",["7","23","24","120","121"])
api_to_csv("statistic-waterUseAgriculture",None)
api_to_csv("statistic-waterUseCultivation",None)
api_to_csv("statistic-waterUseLivestock",None)
api_to_csv("statistic-waterUseLiving",None)
api_to_csv("statistic-waterUseIndustry",None)
api_to_csv("statistic-waterUseOverview",None)
api_to_csv("statistic-monthWaterUse",None)
api_to_csv("statistic-reservoirUse",None)
api_to_csv("statistic-reservoirSiltation",None)
if 1: #process rain-dailySum,10minSum , predefined 3 area geo definition: areaGeo.csv
api_to_csv("rain-dailySum",[2020])
proc_Sum("areaGeo.csv","output/rain-dailySum_2020.csv","output/rain-dailySum_2020_row.csv","output/rain-dailySum_2020_geo.csv")
minSum_range("2020-10-01","2020-10-05")
proc_Sum("areaGeo.csv","output/rain-10minSum_2020-10-01_2020-10-05.csv","output/rain-10minSum_2020-10-01_2020-10-05_row.csv","output/rain-10minSum_2020-10-01_2020-10-05_geo.csv")
def get_value_by_index(df,keyvalue, target_col):
"""
find df's column(key) = value, return value of target_col
keyvalue: col_name=value
"""
cols = keyvalue.split("=")
if len(cols)!=2:
return ""
keyvalue_key = cols[0]
keyvalue_value = cols[1]
if is_numeric_dtype(df[keyvalue_key]):
keyvalue_value=float(cols[1])
if not target_col in df.columns:
return ""
values = df[df[keyvalue_key]==keyvalue_value][target_col].values.tolist()
if len(values)>0:
value = values[0]
else:
value = ""
return value
#----- 多水庫庫容百分比分析
def reservoir_load(bag,date_start, date_end, reservoir_list): #[10405,10201,10205]
df_info = api_to_csv("reservoir-info",None)
filename=api_to_csv_range(date_start,date_end,"reservoir-reservoirData",None,"ObservationTime")
dest_name="%s_GMT8.csv" %(filename[:-4])
df=csv_add_gmt8(filename,"ObservationTime", dest_name )
#handle info
df_info=df_info[df_info['Year']==105]
df_info.drop_duplicates(subset="id")
df_info["id"] = pd.to_numeric(df_info["id"])
#merge/filter
df2=df.merge(df_info, how='left', left_on='ReservoirIdentifier', right_on='id')
df2=df2.drop_duplicates(subset=["ObservationTime","ReservoirIdentifier"],keep='last')
df2=df2[df2['ReservoirIdentifier'].isin(reservoir_list)] #,20101,20201
#Calculate, Pivot
df2["ObservationTimeGMT8"] = pd.to_datetime(df2['ObservationTimeGMT8'])
df2['percent']=df2['EffectiveWaterStorageCapacity']/df2['EffectiveCapacity']*100
df2=df2[df2['percent']<=100]
df3 = df2.pivot(index='ObservationTimeGMT8', columns='ReservoirName', values='percent')
bag['reservoir-info']=df_info
bag['reservoir-reservoirData']=df2
bag['reservoir_pivot']=df3
def reservoir_plot(bag):
#plot
#%matplotlib notebook
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
myfont = FontProperties(fname=r'/Library/Fonts/Microsoft/SimSun.ttf')
df = bag['reservoir_pivot']
df.plot()
plt.title("多水庫2021庫容比例",fontproperties=myfont)
plt.legend(prop=myfont)
plt.xticks(fontname = 'SimSun',size=8)
plt.yticks(fontname = 'SimSun',size=8)
plt.xlabel('時間',fontproperties=myfont)
plt.ylabel('百分比',fontproperties=myfont)
plt.show
#----- 今日淹水
def flood_load(bag,date_str,limit=0):
#load 測站縣市補充資料
df_info_縣市鄉鎮 = pd.read_csv("flood-station_縣市鄉鎮.csv")
#get data, process
df_info=api_to_csv("flood-station",None)
df_info=df_info.merge(df_info_縣市鄉鎮, how='left', left_on='_id', right_on='_id')
df_info
#date_str = date.today() # 2021-06-07
print("Today is %s" %(date_str))
df = api_to_csv("flood-floodData",[date_str])
df["timeGMT8"] = df['time'].apply(date_to_gmt8)
df["timeGMT8"] = pd.to_datetime(df['timeGMT8'])
df=df.merge(df_info_縣市鄉鎮, how='left', left_on='stationID', right_on='_id')
df=df.drop_duplicates(subset=["time","stationName"],keep='last')
df['stationName_city']=df['COUNTYNAME'] + '|' + df['TOWNNAME'] + '|' + df['stationName']
#filter, sort
df=df[df['value']>=limit] #可改淹水高度, 有很多淹水資料時,改高一點比較不會太多
df.sort_values(by=['timeGMT8'])
bag['flood-station_縣市鄉鎮']=df_info_縣市鄉鎮
bag['flood-station']=df_info
bag['flood-floodData']=df
def flood_plot(bag,date_str):
#%matplotlib notebook
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
df = bag['flood-floodData']
myfont = FontProperties(fname=r'/Library/Fonts/Microsoft/SimSun.ttf')
df2 = df.pivot(index='timeGMT8', columns='stationName_city', values='value')
df2.plot(style='.-')
title = "今日 %s 淹水感測器淹水值" %(date_str)
plt.title(title,fontproperties=myfont)
plt.legend(prop=myfont)
plt.xticks(fontname = 'SimSun',size=8)
plt.yticks(fontname = 'SimSun',size=8)
plt.xlabel('時間',fontproperties=myfont)
plt.ylabel('公分',fontproperties=myfont)
fig = plt.gcf()
fig.set_size_inches(8.5, 4.5)
plt.show
#淹水測站列表
def flood_list(bag):
df = bag['flood-floodData']
ary = df['stationName_city'].unique()
for name in ary:
print(name)
#----- 雨量站相關
#列出新竹市測站
def rain_station_view():
df_info = api_to_csv("rain-station",None)
filter_city = df_info['city']=='新竹縣'
df_info = df_info[filter_city]
return df_info
def rain_load(bag, date_str,limit=0,reload=False):
df_info = api_to_csv("rain-station",None)
#date_str = date.today() # 2021-06-07
print("Today is %s" %(date_str))
df=api_to_csv("rain-rainData",[date_str,"20","26","120","122"],reload)
df["timeGMT8"] = df['time'].apply(date_to_gmt8)
df["timeGMT8"] = pd.to_datetime(df['timeGMT8'])
df=df.merge(df_info, how='left', left_on='stationID', right_on='stationID')
df=df.drop_duplicates(subset=["timeGMT8","stationID"],keep='last')
df['stationName']=df['city'] + '|' + df['town'] + '|' + df['name'] + '|' + df['stationID']
#filter, sort
df=df[df['now']>=limit] #可改雨量值, 有很多淹水資料時,改高一點比較不會太多
df=df.sort_values(by=['timeGMT8','stationID'])
bag['rain-station']=df_info
bag['rain-rainData']=df
#今日雨量 pivot
def rain_plot(bag,date_str, user_df=None):
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
if user_df is None:
df = bag['rain-rainData']
else:
df = user_df
myfont = FontProperties(fname=r'/Library/Fonts/Microsoft/SimSun.ttf')
df2 = df.pivot(index='timeGMT8', columns='stationName', values='now')
df2.plot(style='.-')
title = "今日 %s 雨量站值" %(date_str)
plt.title(title,fontproperties=myfont)
plt.legend(prop=myfont)
plt.xticks(fontname = 'SimSun',size=8)
plt.yticks(fontname = 'SimSun',size=8)
plt.xlabel('時間',fontproperties=myfont)
plt.ylabel('mm',fontproperties=myfont)
fig = plt.gcf()
fig.set_size_inches(8.5, 4.5)
plt.show
def rain_hourdiff(bag, time_set,station_city):
#時雨量
df_info = bag['rain-station']
df = bag['rain-rainData']
#df_info.head()
if station_city is None:
stations = None
else:
f1=df_info['city'].isin(station_city)
#df_info[f1].values.tolist()
#df_info[f1]['city'].unique()
stations = df_info[f1]['stationID'].tolist()
#print(stations)
#df.head()
#time_set=['2021-06-10 15:00:00','2021-06-10 16:00:00']
f_time=df['timeGMT8'].isin(time_set)
if stations is None:
df_f = df[f_time]
else:
f_station=df['stationID'].isin(stations)
df_f = df[f_station & f_time]
#df[f_station]
#df['city'].unique()
if len(df_f.index)>0:
#print(df_f)
df_f = df_f.drop_duplicates(['stationName','timeGMT8'])
df_pivot = df_f.pivot(index='stationName', columns='timeGMT8', values='now')
print("time_set=%s" %(time_set))
#print(df_pivot)
df_pivot['rain_1hour']=df_pivot[time_set[1]]-df_pivot[time_set[0]]
bag['rain-hourdiff']=df_pivot
return True
else:
print("no data!")
return False
def to_slot_10min(t_src):
#t_now = datetime.now()
#t_added = timedelta(minutes = 10)
#t_slot= t_src - t_added
slot_min=int(int(t_src.minute/10)*10)
#date_str="%i-%i-%i %i:%02i:00" %(t_src.year,t_src.month,t_src.day,t_src.hour,slot_min)
date_str="%i-%02i-%02i %02i:%02i:00" %(t_src.year,t_src.month,t_src.day,t_src.hour,slot_min)
return date_str
def get_2slot(t_src,hour):
#print("t_src=%s" %(t_src))
date_str = to_slot_10min(t_src)
date_obj = datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S")
date_obj2 = date_obj + timedelta(hours = hour)
date_str2 = date_obj2.strftime("%Y-%m-%d %H:%M:%S")
return [date_str,date_str2]
def rain_alarm_hour(bag,station_city,limit):
rain_load(gd, date.today(),True)
#time_set=['2021-06-10 15:00:00','2021-06-10 16:00:00']
time_now = datetime.now()
time_set = get_2slot(time_now-timedelta(minutes = 90),1)
#print(time_set)
#station_city=['新竹縣','新竹市']
rain_hourdiff(gd,time_set,station_city)
df_pivot = gd['rain-hourdiff']
if len(df_pivot.index)>0:
df_pivot=df_pivot[df_pivot['rain_1hour']>limit]
df_pivot=df_pivot.sort_values(by=['rain_1hour'],ascending=False)
print("-----\nMonitor time: %s : %s 雨量站時雨量 > %i mm -----\n" %(time_now.strftime("%Y-%m-%d %H:%M:%S"), station_city,limit))
print(df_pivot)
else:
print("no data!")
def rain_day_max(bag,date_str,station_city):
rain_load(bag, date_str,True)
#station_city=['新竹縣','新竹市']
df_info = gd['rain-station']
df = gd['rain-rainData']
#df_info.head()
f1=df_info['city'].isin(station_city)
#df_info[f1].values.tolist()
#df_info[f1]['city'].unique()
stations = df_info[f1]['stationID'].tolist()
#f_time=df['timeGMT8'].isin(time_set)
f_station=df['stationID'].isin(stations)
df_f = df[f_station]
df_agg=df_f.groupby('stationName').agg({'now': ['max']})
bag['rain_day_max']=df_agg
def rain_load_range(bag,date_start, date_end, limit=0,reload=False):
df_info = api_to_csv("rain-station",None)
#date_str = date.today() # 2021-06-07
#print("Today is %s" %(date_str))
filename=api_to_csv_range(date_start,date_end,"rain-rainData",["20","26","120","122"],"time")
dest_name="%s_GMT8.csv" %(filename[:-4])
df=csv_add_gmt8(filename,"time", dest_name )
#df=api_to_csv_range("rain-rainData",[date_str,"20","26","120","122"],reload)
if 1:
#df["timeGMT8"] = df['time'].apply(date_to_gmt8)
df["timeGMT8"] = pd.to_datetime(df['timeGMT8'])
df=df.merge(df_info, how='left', left_on='stationID', right_on='stationID')
df=df.drop_duplicates(subset=["timeGMT8","stationID"],keep='last')
df['stationName']=df['city'] + '|' + df['town'] + '|' + df['name'] + '|' + df['stationID']
#filter, sort
df=df[df['now']>=limit] #可改雨量值, 有很多淹水資料時,改高一點比較不會太多
df=df.sort_values(by=['timeGMT8','stationID'])
bag['rain-station']=df_info
bag['rain-rainData']=df
#-----觀察寶山第二水庫與雨量站的變化
def rain_reservoir_multi(date_range,station_list,reservoir_list):
#多雨量站資料
#date_range=['2021-05-26','2021-06-12']
if 1:
#rain_load(gd, date_str,limit=0,reload=True)
rain_load_range(gd,date_range[0],date_range[1],0,True)
#gd['rain-rainData']
df_info = gd['rain-station']
df1= gd['rain-rainData']
#station_list=['C0D550','C1D410','01D100','C0D580']
f1=df1['stationID'].isin(station_list)
df1 = df1[f1]
#print(df1.columns)
#rain_plot(gd,date_str, df[f1])
#寶二水庫
if 1:
reservoir_load(gd,date_range[0],date_range[1],reservoir_list)
#reservoir_plot(gd)
df2=gd['reservoir-reservoirData']
#reservoir_list = [10405]
#print(df2.columns)
if 1:
#%matplotlib notebook
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
myfont = FontProperties(fname=r'/Library/Fonts/Microsoft/SimSun.ttf')
count_all = len(station_list)+len(reservoir_list)
fig, axs = plt.subplots(count_all)
title = "%s-%s 雨量站與水庫庫容" %(date_range[0],date_range[1])
fig.suptitle(title,fontproperties=myfont)
ax_cur=0
for i in range(len(station_list)):
#label = station_list[i]
label = get_value_by_index(df1,"stationID=%s" %(station_list[i]), 'stationName')
axs[ax_cur].set_ylabel(label,fontproperties=myfont) #,loc="top"
#axs[i].plot(x, y)
f1=df1['stationID']==station_list[i]
df1[f1].plot(x='timeGMT8',y='now',ax=axs[ax_cur],label=label,color='red',legend=None) #no need to specify for first axi
ax_cur+=1
for j in range(len(reservoir_list)):
label = reservoir_list[j]
#label = get_value_by_index(df2,"ReservoirIdentifier=%i" %(reservoir_list[j]), 'ReservoirName')
label = get_value_by_index(df2,"id=%i" %(reservoir_list[j]), 'ReservoirName')
axs[ax_cur].set_ylabel(label,fontproperties=myfont) #,loc="top"
#axs[j].plot(x, y)
f2=df2['ReservoirIdentifier']==reservoir_list[j]
#names = df2[f2]['ReservoirName'].tolist()
df2[f2].plot(x='ObservationTimeGMT8',y='percent',ax=axs[ax_cur],label=label,color='blue',legend=None) #no need to specify for first axi
ax_cur+=1
#plt.title(title,fontproperties=myfont)
plt.xticks(fontname = 'SimSun',size=10)
plt.yticks(fontname = 'SimSun',size=10)
fig = plt.gcf()
fig.set_size_inches(12.0, 4*count_all)
plt.show
#----- 水位站
#查水位站
#常用站點 '河川水位測站-內灣-1300H013','河川水位測站-經國橋-1300H017' , '河川水位測站-上坪-1300H014'
def waterLevel_view():
df_info = api_to_csv("waterLevel-station",None) #'BasinIdentifier',ObservatoryName
filter_river = df_info['RiverName']=='上坪溪'
#filter_name = df_info['BasinIdentifier']=='1300H017'
# ID 查名稱
#value=get_value_by_index(df_info,"BasinIdentifier=1140H037", 'ObservatoryName')
value=get_value_by_index(df_info,"ObservatoryName=河川水位測站-內灣-1300H013", 'BasinIdentifier')
print(value)
return df_info[filter_river]
#準備今天的資料
def waterLevel_load(bag,date_str,reload=False):
df_info = api_to_csv("waterLevel-station",None) #'BasinIdentifier',ObservatoryName
#date_str=date.today() #2021-06-08
#date_str='2021-06-08'
df=api_to_csv("waterLevel-waterLevelData",[date_str,"23","25","120","123"],reload) #'RecordTime', 'StationIdentifier', 'WaterLevel'
df["RecordTimeGMT8"] = df['RecordTime'].apply(date_to_gmt8)
df["RecordTimeGMT8"] =
|
pd.to_datetime(df['RecordTimeGMT8'])
|
pandas.to_datetime
|
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
import csv
from datetime import datetime
from io import StringIO
import os
import platform
from tempfile import TemporaryFile
from urllib.error import URLError
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
import pandas._testing as tm
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_empty_decimal_marker(all_parsers):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = "Only length-1 decimal markers supported"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), decimal="")
def test_bad_stream_exception(all_parsers, csv_dir_path):
# see gh-13652
#
# This test validates that both the Python engine and C engine will
# raise UnicodeDecodeError instead of C engine raising ParserError
# and swallowing the exception that caused read to fail.
path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup("utf-8")
parser = all_parsers
msg = "'utf-8' codec can't decode byte"
# Stream must be binary UTF8.
with open(path, "rb") as handle, codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
) as stream:
with pytest.raises(UnicodeDecodeError, match=msg):
parser.read_csv(stream)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
def test_squeeze(all_parsers):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True)
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
def test_malformed(all_parsers):
# see gh-6607
parser = all_parsers
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#")
@pytest.mark.parametrize("nrows", [5, 3, None])
def test_malformed_chunks(all_parsers, nrows):
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
parser = all_parsers
msg = "Expected 3 fields in line 6, saw 5"
reader = parser.read_csv(
StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
)
with pytest.raises(ParserError, match=msg):
reader.read(nrows)
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
dtype=np.int64,
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_no_index_name(all_parsers, csv_dir_path):
parser = all_parsers
csv2 = os.path.join(csv_dir_path, "test2.csv")
result = parser.read_csv(csv2, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
[1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
[0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
[1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
[-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
],
columns=["A", "B", "C", "D", "E"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
]
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_wrong_num_columns(all_parsers):
# Too few columns.
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
parser = all_parsers
msg = "Expected 6 fields in line 3, saw 7"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
def test_read_duplicate_index_explicit(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(all_parsers):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"A,B\nTrue,1\nFalse,2\nTrue,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
dict(true_values=["yes", "Yes", "YES"], false_values=["no", "NO", "No"]),
DataFrame(
[[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
columns=["A", "B"],
),
),
(
"A,B\nTRUE,1\nFALSE,2\nTRUE,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nfoo,bar\nbar,foo",
dict(true_values=["foo"], false_values=["bar"]),
DataFrame([[True, False], [False, True]], columns=["A", "B"]),
),
],
)
def test_parse_bool(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_int_conversion(all_parsers):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"],
)
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
@pytest.mark.parametrize("index_col", [0, "index"])
def test_read_chunksize_with_index(all_parsers, index_col):
parser = all_parsers
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = parser.read_csv(StringIO(data), index_col=0, chunksize=2)
expected = DataFrame(
[
["foo", 2, 3, 4, 5],
["bar", 7, 8, 9, 10],
["baz", 12, 13, 14, 15],
["qux", 12, 13, 14, 15],
["foo2", 12, 13, 14, 15],
["bar2", 12, 13, 14, 15],
],
columns=["index", "A", "B", "C", "D"],
)
expected = expected.set_index("index")
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
def test_read_chunksize_bad(all_parsers, chunksize):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), chunksize=chunksize)
@pytest.mark.parametrize("chunksize", [2, 8])
def test_read_chunksize_and_nrows(all_parsers, chunksize):
# see gh-15755
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), expected)
def test_read_chunksize_and_nrows_changing_size(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=8, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
with pytest.raises(StopIteration, match=""):
reader.get_chunk(size=3)
def test_get_chunk_passed_chunksize(all_parsers):
parser = all_parsers
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
reader = parser.read_csv(StringIO(data), chunksize=2)
result = reader.get_chunk()
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(), dict(index_col=0)])
def test_read_chunksize_compat(all_parsers, kwargs):
# see gh-12185
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
reader = parser.read_csv(StringIO(data), chunksize=2, **kwargs)
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), result)
def test_read_chunksize_jagged_names(all_parsers):
# see gh-23509
parser = all_parsers
data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
reader = parser.read_csv(StringIO(data), names=range(10), chunksize=4)
result = concat(reader)
tm.assert_frame_equal(result, expected)
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = dict(index_col=0)
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
parser = TextParser(data_list, chunksize=2, **kwargs)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_iterator(all_parsers):
# see gh-6607
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
expected = parser.read_csv(StringIO(data), **kwargs)
reader = parser.read_csv(StringIO(data), iterator=True, **kwargs)
first_chunk = reader.read(3)
tm.assert_frame_equal(first_chunk, expected[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, expected[3:])
def test_iterator2(all_parsers):
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result[0], expected)
def test_reader_list(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
|
tm.assert_frame_equal(chunks[2], expected[4:])
|
pandas._testing.assert_frame_equal
|
###########################################################
# Encode
###########################################################
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn import preprocessing,model_selection, ensemble
from sklearn.preprocessing import LabelEncoder
import scipy.stats as ss
from sklearn.externals import joblib
from scipy.sparse import csr_matrix
def cat2MedianShiftEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
med_y = np.median(y)
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_medshftenc'] = datax['y_median']-med_y
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(0)
datatst = datatst.join(datax,on=[c], how='left').fillna(0)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_medshftenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
return train_df, test_df
def cat2MeanShiftEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
mean_y = np.mean(y)
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_meanshftenc'] = datax['y_mean'] - mean_y
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(0)
datatst = datatst.join(datax,on=[c], how='left').fillna(0)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold*nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_meanshftenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
return train_df, test_df
def cat2MeanEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
rn = np.mean(y)
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_meanenc'] = datax['y_mean']
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(rn)
datatst = datatst.join(datax,on=[c], how='left').fillna(rn)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold*nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_meanenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = pd.concat((enc_mat,train_df), axis=1)
test_df = pd.concat([enc_mat_test,test_df],axis=1)
return train_df, test_df
def cat2MedianEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
rn = np.mean(y)
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_medianenc'] = datax['y_mean']
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(rn)
datatst = datatst.join(datax,on=[c], how='left').fillna(rn)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold*nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_medianenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = pd.concat((enc_mat,train_df), axis=1)
test_df = pd.concat([enc_mat_test,test_df],axis=1)
return train_df, test_df
def countEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
rn = 999
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_countenc'] = datax['y_len']
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(rn)
datatst = datatst.join(datax,on=[c], how='left').fillna(rn)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= nbag
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_countenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = pd.concat((enc_mat,train_df), axis=1)
test_df = pd.concat([enc_mat_test,test_df],axis=1)
return train_df, test_df
def rankCountEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
rn = 999
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_rankenc'] = datax['y_len']
datax[c+'_rankenc'] = ss.rankdata(datax[c+'_rankenc'].values)
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(rn)
datatst = datatst.join(datax,on=[c], how='left').fillna(rn)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat =
|
pd.DataFrame(enc_mat)
|
pandas.DataFrame
|
__version__ = "v1.0"
__copyright__ = "Copyright 2021"
__license__ = "MIT"
__lab__ = "<NAME>s lab"
import time
import pandas as pd
class reader(object):
def __init__(self, ):
pass
def todf(self, seqs, names):
"""
Parameters
----------
seqs
names
Returns
-------
"""
umi_df_stime = time.time()
df_fastq =
|
pd.DataFrame(seqs, columns=['seq_raw'])
|
pandas.DataFrame
|
#%% [markdown]
# # Matching when including the contralateral connections
#%% [markdown]
# ## Preliminaries
#%%
import datetime
import os
import time
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from giskard.plot import adjplot, matched_stripplot, matrixplot
from numba import jit
from pkg.data import load_maggot_graph, load_matched
from pkg.io import OUT_PATH
from pkg.io import glue as default_glue
from pkg.io import savefig
from pkg.match import BisectedGraphMatchSolver, GraphMatchSolver
from pkg.plot import method_palette, set_theme
from pkg.utils import get_paired_inds, get_paired_subgraphs, get_seeds
from scipy.optimize import linear_sum_assignment
from scipy.stats import wilcoxon
FILENAME = "larva_brain"
DISPLAY_FIGS = True
OUT_PATH = OUT_PATH / FILENAME
def glue(name, var, **kwargs):
default_glue(name, var, FILENAME, **kwargs)
def gluefig(name, fig, **kwargs):
savefig(name, foldername=FILENAME, **kwargs)
glue(name, fig, figure=True)
if not DISPLAY_FIGS:
plt.close()
t0 = time.time()
set_theme()
rng = np.random.default_rng(8888)
#%% [markdown]
# ### Load the data
#%%
left_adj, left_nodes = load_matched("left")
right_adj, right_nodes = load_matched("right")
left_nodes["inds"] = range(len(left_nodes))
right_nodes["inds"] = range(len(right_nodes))
seeds = get_seeds(left_nodes, right_nodes)
all_nodes =
|
pd.concat((left_nodes, right_nodes))
|
pandas.concat
|
from datetime import datetime
import re
import unittest
import nose
from nose.tools import assert_equal
import numpy as np
from pandas.tslib import iNaT
from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp
from pandas import compat
from pandas.compat import range, long, lrange, lmap, u
from pandas.core.common import notnull, isnull
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.core.config as cf
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_is_sequence():
is_seq = com._is_sequence
assert(is_seq((1, 2)))
assert(is_seq([1, 2]))
assert(not is_seq("abcd"))
assert(not is_seq(u("abcd")))
assert(not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert(not is_seq(A()))
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(notnull(float_series), Series))
assert(isinstance(notnull(obj_series), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(isnull(float_series), Series))
assert(isinstance(isnull(obj_series), Series))
# call on DataFrame
df = DataFrame(np.random.randn(10, 5))
df['foo'] = 'bar'
result = isnull(df)
expected = result.apply(isnull)
tm.assert_frame_equal(result, expected)
def test_isnull_tuples():
result = isnull((1, 2))
exp = np.array([False, False])
assert(np.array_equal(result, exp))
result = isnull([(False,)])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([(1,), (2,)])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(('foo', 'bar'))
assert(not result.any())
result = isnull((u('foo'), u('bar')))
assert(not result.any())
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(['foo', 'bar'])
assert(not result.any())
result = isnull([u('foo'), u('bar')])
assert(not result.any())
def test_isnull_datetime():
assert (not isnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert(notnull(idx).all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert(mask[0])
assert(not mask[1:].any())
def test_datetimeindex_from_empty_datetime64_array():
for unit in [ 'ms', 'us', 'ns' ]:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert(len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A' : np.asarray(lrange(10),dtype='float64'),
'B' : Timestamp('20010101') }))
df.iloc[3:6,:] = np.nan
result = df.loc[4,'B'].value
assert(result == iNaT)
s = df['B'].copy()
s._data = s._data.setitem(tuple([slice(8,9)]),np.nan)
assert(isnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert(s[8].value == np.datetime64('NaT').astype(np.int64))
def test_any_none():
assert(com._any_none(1, 2, 3, None))
assert(not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert(com._all_not_none(1, 2, 3, 4))
assert(not com._all_not_none(1, 2, 3, None))
assert(not com._all_not_none(None, None, None, None))
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = com.pprint_thing(b, quote_strings=True)
assert_equal(res, repr(b))
res = com.pprint_thing(b, quote_strings=False)
assert_equal(res, b)
def test_rands():
r = com.rands(10)
assert(len(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = com.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(com.iterpairs(data))
assert(result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_indent():
s = 'a b c\nd e f'
result = com.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = com.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.map_indices_py(data)
assert(result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert([4, 5, 6] == inter)
def test_intersection():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(
|
com.intersection(a, b)
|
pandas.core.common.intersection
|
from avatarpy.core import Core
from avatarpy.transform import Transform
from avatarpy.animate import Animate
from avatarpy.describe import Describe
from avatarpy.annotation import Annotation
import numpy as np
import pandas as pd
from itertools import combinations
from scipy.stats import zscore
class Avatar(Core):
_nodes={
'nose' :[0,1,2],
'neck' :[3,4,5],
'anus' :[6,7,8],
'chest' :[9,10,11],
'rfoot' :[12,13,14],
'lfoot' :[15,16,17],
'rhand' :[18,19,20],
'lhand' :[21,22,23],
'tip' :[24,25,26],
}
_vectors={
'head' :{'head':'nose', 'tail':'neck'},
'fbody' :{'head':'neck', 'tail':'chest'},
'hbody' :{'head':'chest', 'tail':'anus'},
'tail' :{'head':'tip', 'tail':'anus'},
'rarm' :{'head':'rhand', 'tail':'chest'},
'larm' :{'head':'lhand', 'tail':'chest'},
'rleg' :{'head':'rfoot', 'tail':'anus'},
'lleg' :{'head':'lfoot', 'tail':'anus'},
'nose2anus' :{'head':'anus', 'tail':'nose'},
'lhand2nose':{'head':'nose', 'tail':'lhand'},
'rhand2nose':{'head':'nose', 'tail':'rhand'},
}
_angles={
'head':{'left':'head', 'right':'fbody'},
'body':{'left':'fbody', 'right':'hbody'},
'tail':{'left':'hbody', 'right':'tail'},
'larm':{'left':'larm', 'right':'fbody'},
'rarm':{'left':'rarm', 'right':'fbody'},
'lleg':{'left':'lleg', 'right':'hbody'},
'rleg':{'left':'rleg', 'right':'hbody'},
}
def __init__(self, csv_path, frame_rate=20, ID=None, tags={}, horizontal_correction=True):
self._csv_path = csv_path
self._data = pd.read_csv(csv_path, header=None)
self._frame_rate = frame_rate
if frame_rate: self.data.index/=frame_rate
self._ID=ID if ID else self.csv_path
self._tags = tags
self.set_nodes()
self.set_vectors()
self._transform = Transform(parent=self)
self._animate = Animate(parent=self)
self._describe = Describe(parent=self)
self._annotation = Annotation(parent=self)
# self._human_annotation = HumanAnnotation(parent=self)
# self._heuristic_annotation = HeuristicAnnotation(parent=self)
if horizontal_correction:
self.data = self.transform.level().data
self.set_nodes()
self.set_vectors()
@property
def csv_path(self):
"""Original coordinates file path"""
return self._csv_path
@csv_path.setter
def csv_path(self, v):
self._csv_path = v
@property
def data(self):
"""Raw data of repeated x, y, z coordinates of nodes"""
return self._data
@data.setter
def data(self, v):
self._data = v
@property
def frame_rate(self):
"""Number of frames recorded in 1 second. (a.k.a. data rate, sampling rate)"""
return self._frame_rate
@frame_rate.setter
def frame_rate(self, v):
self._frame_rate = v
@property
def ID(self):
"""User provided ID for avatar instance. (default: csv_path)"""
return self._ID
@ID.setter
def ID(self, v):
self._ID = v
@property
def tags(self):
"""User provided tags for avatar instance. add tags by dict (ex, {'genotype':'wt'})"""
return self._tags
@tags.setter
def tags(self, v):
self._tags = v
def __repr__(self):
return f'Avatar({self.ID})'
@property
def help(self):
"""Prints list of available functions and attributes of avatar"""
functions, attributes = [], []
for item in dir(self.__class__):
if item.startswith('_'):
continue
docstring = getattr(self.__class__, item).__doc__
headline = docstring.split('\n')[0].strip() if docstring else ''
if callable(getattr(self.__class__, item)):
functions.append(f"\t{item}: \n\t\t{headline}")
else:
attributes.append(f"\t{item}: \n\t\t{headline}")
funcs = '\n'.join(functions)
attrs = '\n'.join(attributes)
print(f"""Visit https://github.com/jeakwon/avatarpy\n
Functions:\n{funcs}\nAttributes:\n{attrs}""")
def set_nodes(self):
"""Set node attributes in avatar with predefined node info in `cls._nodes`"""
for name, cols in self._nodes.items():
data = self.get_node(cols)
setattr(self, name, data)
def set_vectors(self):
"""Set vector attributes in avatar with predefined vector info in `cls._nodes`"""
for name, labels in self._vectors.items():
data = self.get_vector(self[labels['head']], self[labels['tail']])
setattr(self, name, data)
@property
def index(self):
"""Index of recording timecourse. If frame rate is provided, unit is second"""
return self.data.index
@property
def nodes(self):
"""Dictionary of all nodes data"""
return {key:self[key] for key in self._nodes.keys()}
@property
def vectors(self):
"""Dictionary of all vectors data"""
return {key:self[key] for key in self._vectors.keys()}
@property
def x(self):
"""x coordindates of nodes and vectors"""
return self.get_axis_data('x')
@property
def y(self):
"""y coordindates of nodes and vectors"""
return self.get_axis_data('y')
@property
def z(self):
"""z coordindates of nodes and vectors"""
return self.get_axis_data('z')
@property
def x_max(self):
"""Maximum x coord value among all nodes"""
return self.x[self.nodes].values.max()
@property
def x_min(self):
"""Minimum x coord value among all nodes"""
return self.x[self.nodes].values.min()
@property
def y_max(self):
"""Maximum y coord value among all nodes"""
return self.y[self.nodes].values.max()
@property
def y_min(self):
"""Minimum y coord value among all nodes"""
return self.y[self.nodes].values.min()
@property
def z_max(self):
"""Maximum z coord value among all nodes"""
return self.z[self.nodes].values.max()
@property
def z_min(self):
"""Minimum z coord value among all nodes"""
return self.z[self.nodes].values.min()
@property
def unit_vector_x(self):
"""Numpy 2d array (N x 3) of x unit vector [1,0,0]"""
return self.get_unit_vector(axis='x')
@property
def unit_vector_y(self):
"""Numpy 2d array (N x 3) of y unit vector [0,1,0]"""
return self.get_unit_vector(axis='y')
@property
def unit_vector_z(self):
"""Numpy 2d array (N x 3) of z unit vector. [0,0,1]"""
return self.get_unit_vector(axis='z')
@property
def node_data(self):
"""Coords data with labels of all nodes. [x, y, z, node]"""
return self.get_node_data(self.nodes.keys())
def get_node_data(self, nodes):
"""Returns coords data with labels of provided nodes names"""
labeled_data = [self[node].assign(node=node) for node in nodes]
return pd.concat(labeled_data).sort_index()
def get_unit_vector(self, axis):
"""Returns numpy 2d array (N x 3) of given axis unit vector"""
if axis=='x': arr = np.array([1,0,0])
elif axis=='y': arr = np.array([0,1,0])
elif axis=='z': arr = np.array([0,1,0])
return np.stack([arr]*len(self.index))
def get_node(self, columns):
"""Returns T-series node x, y, z coords by assigned index of columns"""
return self.data[columns].set_axis(['x', 'y', 'z'], axis=1, inplace=False)
def get_vector(self, head, tail):
"""Returns T-series vector x, y, z coords by assigned index of columns"""
return pd.DataFrame(head.values-tail.values, columns=['x', 'y', 'z']).set_index(self.data.index)
def get_axis_data(self, axis):
"""Returns axis data of all nodes and vectors"""
data_dict = {}
for name, data in self.nodes.items():
data_dict[name]=data[axis].values
for name, data in self.vectors.items():
data_dict[name]=data[axis].values
return pd.DataFrame(data_dict).set_index(self.data.index)
def get_triangular_area_by_nodes(self, node1, node2, node3):
r"""Calcultes triangular area by node names"""
coord1, coord2, coord3 = self[node1], self[node2], self[node3]
name = '_'.join([node1, node2, node3])
data = self.get_triangular_area_by_coords(coord1, coord2, coord3)
return pd.Series(data=data, name=name, index=self.data.index)
@property
def area(self):
"""Returns T-series areas from all combination of nodes"""
nodes_combinations = list(combinations(self.nodes.keys(), 3))
return pd.DataFrame([self.get_triangular_area_by_nodes(*c) for c in nodes_combinations]).T
@property
def angle(self):
"""Returns T-series angles between predefined two vectors"""
data_dict = {}
for name, vectors in self._angles.items():
data_dict[name]=self.get_angle(self[vectors['left']], self[vectors['right']])
return pd.DataFrame(data_dict).set_index(self.data.index)
@property
def angle_velocity(self):
"""Returns T-series angles velocity between predefined two vectors"""
return self.angle.diff()*self.frame_rate
@property
def angle_acceleration(self):
"""Returns T-series angles acceleration between predefined two vectors"""
return self.angle_velocity.diff()*self.frame_rate
@property
def vector_length(self):
"""Returns length of all vectors"""
data_dict = {}
for name, vector in self.vectors.items():
data_dict[name]=self.get_distance(vector)
return pd.DataFrame(data_dict).set_index(self.data.index)
@property
def stretch_index(self):
"""Returns stretch_index which is equal to zscore of vector length"""
return self.vector_length.apply(zscore)
@property
def distance(self):
"""Returns inter-frame distances of all coords"""
data_dict = {}
for name, node in self.nodes.items():
data_dict[name]=self.get_distance(node.diff())
return pd.DataFrame(data_dict).set_index(self.data.index)
@property
def velocity(self):
"""Returns moment velocity of all coords"""
return self.distance*self.frame_rate
@property
def acceleration(self):
"""Returns moment acceleration of all coords"""
return self.velocity.diff()*self.frame_rate
@property
def cummulative_distance(self):
"""Returns T-series cumulative distance of all coords"""
return self.distance.cumsum()
@property
def total_distance(self):
"""Returns total explored distance of all coords """
return self.cummulative_distance.max()
def get_vector_coords(self, vector_name):
"""Retruns dataframe dicts of nodes given by vector name."""
return dict(
x = self.x[self._vectors[vector_name].values()],
y = self.y[self._vectors[vector_name].values()],
z = self.z[self._vectors[vector_name].values()],
)
def get_projection(self, vector, to):
"""Project 3D coordinates on to axis or plane"""
zero_axis = list(set('xyz')-set(to))
kwargs = {axis:0 for axis in zero_axis}
return vector.assign(**kwargs)
def x_projection(self, node):
"""Project 3D coordinates on to x axis"""
return self.get_projection(vector=self[node], to='x')
def y_projection(self, node):
"""Project 3D coordinates on to y axis"""
return self.get_projection(vector=self[node], to='y')
def z_projection(self, node):
"""Project 3D coordinates on to z axis"""
return self.get_projection(vector=self[node], to='z')
def xy_projection(self, node):
"""Project 3D coordinates on to xy plane"""
return self.get_projection(vector=self[node], to='xy')
def yz_projection(self, node):
"""Project 3D coordinates on to yz plane"""
return self.get_projection(vector=self[node], to='yz')
def xz_projection(self, node):
"""Project 3D coordinates on to xz plane"""
return self.get_projection(vector=self[node], to='xz')
@property
def transform(self):
"""Transform module for coordinate change"""
return self._transform
@property
def animate(self):
"""Animation module for coordinate change"""
return self._animate
@property
def describe(self):
"""Feature Extract module for coordinate change"""
return self._describe
@property
def annotation(self):
"""Annotation module for behavior screening"""
return self._annotation
def corr(self, data, window=None, center=True, **kwargs):
"""Returns rolling correlation with given property of data
:param data: (str|pd.DataFrame)
:param window: (int|None) if provided rolling corr is provided
"""
if isinstance(data, str):
data = self[data]
if window==None:
return self.flatten_pairwise_df(data.corr())
return self.get_rolling_corr(data, window, center, **kwargs)
def xcorr_lag(self, data, flatten=True):
"""Returns cross correlation time lag by parwise column calculation. See also xcorr_max
"""
if isinstance(data, str):
data = self[data]
df = self.pairwise_apply_func(data, lambda in1, in2: self.xcorr(in1, in2)['lag']/self.frame_rate)
if flatten:
return self.flatten_pairwise_df(df)
return df
def xcorr_max(self, data, flatten=True):
"""Returns cross correlation max value by parwise column calculation. See also xcorr_lag
"""
if isinstance(data, str):
data = self[data]
df = self.pairwise_apply_func(data, lambda in1, in2: self.xcorr(in1, in2)['max'])
if flatten:
return self.flatten_pairwise_df(df)
return data
def apply(self, func):
return func(self)
def gather(self, features=['x', 'y', 'z', 'aop_x', 'aop_y', 'aop_z', 'aoa_x', 'aoa_y', 'aoa_z', 'velocity', 'acceleration', 'angle', 'angle_velocity', 'angle_acceleration', 'stretch_index', 'area'], multi_index=False):
df =
|
pd.concat({feature:self[feature] for feature in features}, axis=1)
|
pandas.concat
|
from collections import OrderedDict
import requests
import pandas as pd
import plotly.graph_objs as go
# default list of all countries of interest
country_default = OrderedDict(
[
("Canada", "CAN"),
("United States", "USA"),
("Brazil", "BRA"),
("France", "FRA"),
("India", "IND"),
("Italy", "ITA"),
("Germany", "DEU"),
("United Kingdom", "GBR"),
("China", "CHN"),
("Japan", "JPN"),
]
)
# World Bank indicators of interest for pulling data
indicators_default = [
"EG.USE.ELEC.KH.PC", # Electric power consumption (kWh per capita)
"EG.FEC.RNEW.ZS", # Renewable energy consumption (% of total final energy consumption)
"EG.USE.PCAP.KG.OE", # Energy use (kg of oil equivalent per capita)
"EN.ATM.GHGT.KT.CE", # Total greenhouse gas emissions (kt of CO2 equivalent)
]
def return_figures(countries=country_default, indicators=indicators_default):
"""Creates four plotly visualizations using the World Bank API
# Example of the World Bank API endpoint:
# arable land for the United States and Brazil from 1990 to 2015
# http://api.worldbank.org/v2/countries/usa;bra/indicators/AG.LND.ARBL.HA?date=1990:2015&per_page=1000&format=json
Args:
countries (OrderedDict): list of countries for filtering the data
Returns:
list (dict): list containing the four plotly visualizations
"""
# prepare filter data for World Bank API
# the API uses ISO-3 country codes separated by ;
country_filter = ";".join([c.lower() for c in list(countries.values())])
# stores the data frames with the indicator data of interest
data_frames = []
# url endpoints for the World Bank API
urls = []
# pull data from World Bank API and clean the resulting json
# results stored in data_frames variable
for indicator in indicators:
url = (
"http://api.worldbank.org/v2/country/"
+ country_filter
+ "/indicator/"
+ indicator
+ "?date=1990:2015&per_page=1000&format=json"
)
urls.append(url)
try:
resp = requests.get(url)
data = resp.json()[1]
except:
print(f"could not load data for {indicator}")
for value in data:
value["indicator"] = value["indicator"]["value"]
value["country"] = value["country"]["value"]
data_frames.append(data)
# first chart plots Electric power consumption (kWh per capita) from 1990 to 2015
# in top 10 economies as a line chart
graph_one = []
df_one = pd.DataFrame(data_frames[0])
# filter and sort values for the visualization
# filtering plots the countries in decreasing order by their values
df_one = df_one[
df_one["date"].isin(["1990", "1995", "2000", "2005", "2010", "2014"])
]
df_one.sort_values("date", inplace=True)
# this country list is re-used by all the charts to ensure legends have the same
# order and color
countrylist = df_one.country.unique().tolist()
for country in countrylist:
x_val = df_one[df_one["country"] == country].date.tolist()
y_val = df_one[df_one["country"] == country].value.tolist()
graph_one.append(
go.Scatter(x=x_val, y=y_val, mode="lines+markers", name=country)
)
layout_one = dict(
title="Electric power consumption 1990 to 2015<br>(kWh per capita)",
xaxis=dict(title="Year"),
yaxis=dict(title="KWh (Thousand)"),
)
# second chart plots Renewable energy consumption (% of total final energy consumption)
# from 1990 to 2015 as a line chart
graph_two = []
df_two =
|
pd.DataFrame(data_frames[1])
|
pandas.DataFrame
|
"""
This script contains functions to merge separately crawled datasets
and visualize the data in terms of stats.
"""
import pandas as pd
import numpy as np
import csv
import glob
import seaborn as sns
import matplotlib.pyplot as plt
import os
from pprint import pprint
import requests
import operator
desired_width=320
pd.set_option('display.width', desired_width)
np.set_printoptions(linewidth=desired_width)
pd.set_option('display.max_columns',25)
licenseshortDict = {
'Not Specified': 'Not Specified',
'LaTeX Project Public License v1.3c': 'Latex v1.3c',
'Creative Commons Attribution Share Alike 4.0 International': 'Sharealike 4.0',
'GNU Lesser General Public License v2.1': 'GNU LGPL v2.1',
'ISC License': 'ISC',
'PostgreSQL License': 'PostgreSQL',
'Microsoft Reciprocal License': 'MS Reciprocal',
'Universal Permissive License v1.0': 'UPL v1.0',
'BSD 2-Clause "Simplified" License': 'BSD 2',
'Other': 'Other',
'Apache License 2.0': 'Apache 2.0',
'GNU General Public License v2.0': 'GNU GPL 2.0',
'Microsoft Public License': 'MS Public',
'GNU General Public License v3.0': 'GNU GPL v3.0',
'The Unlicense': 'Unlicense',
'MIT License': 'MIT',
'Eclipse Public License 1.0': 'Eclipse 1.0',
'SIL Open Font License 1.1': 'SIL OF 1.1',
'Do What The F*ck You Want To Public License': 'DWTFYWT',
'BSD 4-Clause "Original" or "Old" License': 'BSD 4',
'Artistic License 2.0': 'Artistic 2.0',
'Educational Community License v2.0': 'ECL v2.0',
'Creative Commons Attribution 4.0 International': 'CCA 4.0',
'Boost Software License 1.0': 'BSL 1.0',
'zlib License': 'zlib',
'European Union Public License 1.2': 'EUPL 1.2',
'European Union Public License 1.1': 'EUPL 1.1',
'GNU Lesser General Public License v3.0': 'GNU LGPL v3.0',
'BSD 3-Clause "New" or "Revised" License': 'BSD 3 - CNRL',
'BSD 3-Clause Clear License': 'BSD 3 - CCL',
'Mozilla Public License 2.0': 'Mozilla 2.0',
'Open Software License 3.0': 'OSL 3.0',
'GNU Affero General Public License v3.0': 'GNU AGPL v3.0',
'University of Illinois/NCSA Open Source License': 'UI/NCSA',
'Academic Free License v3.0': 'AFL v3.0',
'Creative Commons Zero v1.0 Universal': 'CCZ v1.0',
'Eclipse Public License 2.0': 'Eclipse 2.0',
'BSD Zero Clause License': 'BSD Zero'
}
def merge_csv(csvtypename):
all_filenames = [i for i in glob.glob('../dataset_merge/{}_*.csv'.format(csvtypename))]
combined_csv = pd.concat([pd.read_csv(f, low_memory=False) for f in all_filenames])
#combined_csv.to_csv( "combined_csv.csv", index=False, encoding='utf-8-sig')
#combined_csv.drop_duplicates(subset='project_id', keep='last', inplace=True)
return combined_csv
def getdays(days):
return int(str(days).split()[0])
def merge_csv_language(languagelist):
all_filenames = [i for i in glob.glob('../dataset_merge/devlanguages_*.csv')]
dflist = []
languagelist = ['project_id']+languagelist
for f in all_filenames:
df_f = pd.read_csv(f, low_memory=False)
df_f = df_f.loc[:, languagelist]
dflist.append(df_f)
combined_csv =
|
pd.concat(dflist)
|
pandas.concat
|
import pandas as pd
# Script to plot the file from memberstats
tsv_data =
|
pd.read_csv('history.tsv', sep='\t')
|
pandas.read_csv
|
from typing import ChainMap, Optional
from altair.vegalite.v4.api import value
from altair.vegalite.v4.schema.channels import Longitude, Opacity, YValue
from altair.vegalite.v4.schema.core import Gradient, GradientStop
from ipywidgets.widgets import widget
from pandas.core.tools.datetimes import Scalar
import streamlit as st
import numpy as np
import pandas as pd
from streamlit.proto.Button_pb2 import Button
import datetime
import altair as alt
import matplotlib.pyplot as plt
from PIL import Image
from streamlit.type_util import is_pandas_version_less_than
import plotly.graph_objects as go
import DogService as ss
# D:\temp\betaGo>streamlit run web.py
icon = Image.open("logo.png")
st.set_page_config(layout='wide', page_title='betaGo',page_icon= icon,
# primaryColor="#F63366"
# backgroundColor="#FFFFFF"
# secondaryBackgroundColor="#F0F2F6"
# textColor="#262730"
# font="sans serif"
) # web name
st.sidebar.title("betaGo")
st.sidebar.write('-------')
# define the account
user_name =''
user_pass =''
switch =0 # close
def test():
with st.form("mood"):
st.write("How is your dog today")
mood_score =st.slider("Score for activity")
workout =st.checkbox("Did you work out today")
# Every form must have a submit button.
submitted1 =st.form_submit_button("Submit")
if submitted1:
st.write("mood:",mood_score,", work out:",workout)
# st.write("Outside the form")
# st.write(slider_val)
with st.form("food"):
st.write("How is your dog's diet today")
food_score =st.slider("Score for appetite")
# Every form must have a submit button.
submitted2 =st.form_submit_button("Submit")
if submitted2:
st.write("mood:",food_score)
with st.form('recording'):
record = st.text_area('what\'s interesting today')
submitted3 =st.form_submit_button("Submit")
if submitted3:
st.write("remembered")
with st.form('picture'):
file =st.file_uploader("today's moment")
if file is not None:
st.image(file)
submitted4 =st.form_submit_button("Submit")
if submitted4:
st.write("submited")
age =9
now ='2021-7-31'
ini_lat = 22
ini_lon = 114
def user():
if switch !=1:
return st.warning('no account')
st.sidebar.write('refrash')
# if switch ==2:
# pet_name =
resign_flag = st.sidebar.checkbox('Resign a dog!')
commit_flag = False
cancel_flag = False
print('resign_flag is: ',resign_flag)
print('Commit_flag is: ',commit_flag)
if resign_flag and commit_flag==False:
name = st.text_input('Dog\'s name:')
birthday = st.date_input('Birthday:')
gender = 'true' if st.selectbox('Gender:',['Male','Female'])=='Male' else 'false'
species = st.text_input('Dog\'s species:')
picture =st.file_uploader("photo")
if name != '' and species != '':
commit_flag = st.button('Commit!')
if commit_flag:
ss.resign_dogs(name,birthday,gender,species,picture)
resign_flag = False
commit_flag = False
search_dog =st.sidebar
user_info =st.sidebar.checkbox('check my imformation')
if user_info:
st.title(user_name)
null0_1,row0_2,null0_2,row0_3,null0_3 =st.beta_columns((0.23,5,0.3,5,0.17))
with row0_2:
dog_n =st.selectbox(' ',ss.get_all_dogs()['name'])
st.write(ss.get_dog_id_by_name(dog_n).values)
st.image(ss.get_photo(ss.get_dog_id_by_name(dog_n).values[0])[0], use_column_width =True, caption ='my photo')
st.write('there should be a place to shift the photo that has been uploaded')
with row0_3:
st.write('''
### **Introduction**
''')
with row0_3:
st.write('there should be some thing you want to show about yourself')
with row0_3:
st.dataframe(ss.get_all_dogs())
st.write('you have :'+len(ss.get_all_dogs())+'dogs')
def data():
if switch !=1:
return st.warning('no account')
them_color ='green'
@st.cache(allow_output_mutation =True)
def fetch_data(pet_name):
heart_rate =pd.read_csv('heart_daily_'+pet_name+'.csv')
return heart_rate
# @st.cache(ttl=300)
def present(pic,mood,heart,respiration,temp,pose,step):
# ====================row 0====================
row0_2,row0_4,row0_6 =st.beta_columns((0.01,0.02,0.01))
with row0_4:
get_name =st.selectbox('NAME',options=ss.get_all_dogs()['name'])
with row0_2:
if len(get_name) !=0:
st.image(pic,use_column_width =True, caption ='my photo')
## birthday
# with row0_4:
# if len(get_name) ==0:
# st.write('''
# choose your dog please
# ''')
# else:
# # birth_d =datetime.datetime.strptime(ss.get_dogs_by_name(get_name)['birthday'],'%Y-%m-%d')
# # st.write(ss.get_dogs_by_name(get_name)['birthday'])
# # st.write(type(pd.DataFrame(ss.get_dogs_by_name(get_name))['birthday']))
# st.write('''
# your dog is **\'{}** years old
# '''.format((ss.get_dogs_by_name(get_name))['birthday'])
## )
with row0_6:
st.image(mood, use_column_width =True, caption ='mood')
null0,row0,null1 =st.beta_columns((0.5,1,0.5))
# row0.write('====================================')
st.write('-------')
st.success('hearth data')
# =====================row 1===================
row1_1,null1_2,row1_3 =st.beta_columns((1,0.5,1))
null_10_0,row11_1,row13_3,null14_4 =st.beta_columns((0.1,2.5,1,0.1))
null_30_0,row31_1,row33_3,null34_4 =st.beta_columns((0.1,2.5,1,0.1))
# row11_1,row13_3 =st.beta_columns((1,0.5))
with row1_1:
heart_check =st.checkbox('heart rate')
st.image('heart.png', caption='heart rate is '+heart+' beat/min',width =100)
if heart_check:
with row11_1:
# df = fetch_data('gaga')
df_sh = ss.get_heart_rate(2)
# st.dataframe(df_s)
st.altair_chart(
alt.Chart(df_sh,
height =200
).mark_area(
line ={'color':'#C84B05'},
color =alt.Gradient(
gradient ='linear',
stops =[alt.GradientStop(color ='white',offset =0.2), # color could be changed with theme
alt.GradientStop(color ='#C84B05', offset =1)],
x1=1,x2=1,y1=1,y2=0
)
).encode(
alt.X('time', type='quantitative',title='hour of day'),
alt.Y('heart_rate', type='quantitative', title='heart beat',scale =alt.Scale(domain=[min(df_sh['heart_rate'])-1,max(df_sh['heart_rate'])+1]))),
use_container_width= True
)
with row13_3:
st.write('''
### illustration:
''')
st.write('''
some description about dog's heart health condition, and a button to illustrate the criterion
''')
st.write(
"""
### [🐕](https://sustech.edu.cn)
""")
with row1_3:
breath_check =st.checkbox('respiration')
st.image('lung.png', caption='respiration rate is '+respiration+' times/min',width =100)
if breath_check:
with row31_1:
df_sb = fetch_data('gaga')
st.altair_chart(
alt.Chart(df_sb,
height =200
# ,width = 100.0
).mark_area(
line ={'color':'#C84B05'},
color =alt.Gradient(
gradient ='linear',
stops =[alt.GradientStop(color ='white',offset =0.2), # color could be changed with theme
alt.GradientStop(color ='#C84B05', offset =1)],
x1=1,x2=1,y1=1,y2=0
)
).encode(
alt.X('time', type='quantitative',title='hour of day'),
alt.Y('data', type='quantitative', title='breath beat',scale =alt.Scale(domain=[min(df_sb['data'])-1,max(df_sb['data'])+1]))),
use_container_width= True
)
with row33_3:
st.write('''
### illustration:
''')
st.write('''
some description about dog's breath health condition, and a button to illustrate the criterion
''')
st.write("""
### [🐕](https://sustech.edu.cn) """)
st.write('-------')
st.success('activity')
# ====================row 2====================
row2_2,row2_4,null2_5,row2_6,null2_7 =st.beta_columns((0.1,0.2,0.05,0.1,0.01))
with row2_2:
# fig,ax =plt.subplot()
temp_df =pd.DataFrame([temp],columns=['temp'])
st.bar_chart(temp_df,width= 10, use_container_width= True)
# st.pyplot(plt.bar(x ='temperature', height =temp))
with row2_4:
pose_sr =['down.png','sit.png','sleep.png','stand.png']
pose_intro =['downward','sitting','sleeping','standing']
st.write('my doy is \'{} now'.format(pose_intro[pose]))
st.image(pose_sr[pose],use_column_width= True)
st.write('''
###### [learn more about dog's pose](https://www.baidu.com)
''')
with row2_6:
step_df =pd.DataFrame([step],columns=['step'])
st.bar_chart(step_df,width= 10, use_container_width= True)
def long_term(pic,gender,mood_index,heart_rate,breath_rate,pose):
row0_2,row0_4,row0_6 =st.beta_columns((0.01,0.02,0.01))
with row0_4:
get_name2 =st.selectbox('NAME',options=ss.get_all_dogs()['name'])
with row0_2:
if len(get_name2) !=0:
st.image(pic,use_column_width =True, caption ='my photo')
with row0_6:
if gender ==1:
st.image('male.png', use_column_width =True)
if gender ==0:
st.image('female.png',use_column_width =True)
# ================row 1======================
st.write('-------')
st.write('''
* ## **mood index**
''')
fig, ax = plt.subplots()
ax.hist(mood_index, bins=20)
st.pyplot(fig)
# ================row 2======================
st.write('----------')
st.write('''* ## **Health data**''')
select_heart_breath =st.selectbox(' ',options=['heart rate','respiration'])
if select_heart_breath =='heart rate':
df_l =heart_rate
fig1 =go.Figure()
fig1.add_trace(go.Scatter(x =df_l['date'], y =df_l['data'],
name ='Heart rate', opacity =0.7,
line =dict(color =them_color,width =2)))
fig1.add_vrect(x0 =str(df_l['date'][0]),x1 =str(df_l['date'][len(df_l['date'])-1]),
line_width =0, fillcolor =them_color, opacity =0.2, annotation_text =get_name2,
annotation_position ='inside top left',
annotation =dict(font_size =14,font_family ="Comic Sans MS"))
fig1.update_xaxes(
rangeselector_activecolor="#EBD2B9",
rangeslider_visible=True
)
fig1.update_layout(
xaxis_title ='date',
yaxis_title ='heart rate',
height =600
)
st.plotly_chart(fig1)
elif select_heart_breath =='respiration':
df_lb =breath_rate
fig2 =go.Figure()
fig2.add_trace(go.Scatter(x =df_lb['date'], y =df_lb['data'],
name ='respiration rate', opacity =0.7,
line =dict(color =them_color,width =2)))
fig2.add_vrect(x0 =str(df_lb['date'][0]),x1 =str(df_lb['date'][len(df_lb['date'])-1]),
line_width =0, fillcolor =them_color, opacity =0.2, annotation_text =get_name2,
annotation_position ='inside top left',
annotation =dict(font_size =14,font_family ="Comic Sans MS"))
fig2.update_xaxes(
rangeselector_activecolor="#EBD2B9",
rangeslider_visible=True
)
fig2.update_layout(
xaxis_title ='date',
yaxis_title ='breath rate',
height =600
)
st.plotly_chart(fig2)
# ===============row 3===============
st.write('-----------')
st.write('''
* ## **Activity**
''')
df_lb =
|
pd.read_csv('heart_longterm_gaga.csv')
|
pandas.read_csv
|
#!/usr/bin/python
import time
import argparse
import pickle
import sys
import numpy as np
import pandas as pd
from MinimizePositions import minimize_positions
from GraphVizLib import EdgeFix
from random import random, shuffle
class Term:
"""
This class represents a term.
A list of the attributes and what they are an abbreviation of:
id = GO id
ns = Namespace
df = def
name = name
acc1 = accuracy score model 1
acc2 = accuracy score model 2
imp = importance
p = parents
c = children
x = x coördinate
y = y coördinate
"""
def __init__(self):
self.id = "NA"
self.ns = ["NA"]
self.df = ["NA"]
self.name = ["NA"]
self.acc1 = "NA"
self.acc2 = "NA"
self.imp = "NA"
self.p = set()
self.c = set()
self.x = "NA"
self.y = "NA"
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('obofile', help="A GO obo file")
parser.add_argument('--model1', help="The first model, this file should "
"contain a column of GO terms and a column of accuracies")
parser.add_argument('--model2', help="The second model, this file should "
"contain a column of GO terms and a column of accuracies")
parser.add_argument('--importance', help="A file containing the importance "
"of each node ordered exactly like model1")
parser.add_argument('--storage_location', help="A filename with or with a path")
parser.add_argument('--pickled', help="file is pickled", action="store_true")
parser.add_argument('--cut_first', help="Cut before calculation of node coordinates",
action="store_true")
parser.add_argument('--cut_last', help="Cut after calculation of node coordinates",
action="store_true")
parser.add_argument('--save', help="Save node coordinates in a pickle file",
action="store_true")
args = parser.parse_args()
def get_user_info():
"""
This function imports accuracies from both accuracy files
and importanceout of the importance file, but only if the
appropriate arguments are given when the parser is called.
"""
if args.model1:
with open(args.model1, "r") as model1:
user_info = {}
id_order = []
for line in model1:
goid, accuracy_model1 = line.split("\t")
if goid not in user_info:
user_info[goid] = [float(accuracy_model1.rstrip('\n'))]
id_order.append(goid)
else:
print("Duplicate accuracy found!\t", goid)
if args.model2:
with open(args.model2) as model2:
for line in model2:
goid, accuracy_model2 = line.split("\t")
if goid not in user_info:
print("Term lacks accuracy from first model:\t", goid)
else:
user_info[goid].append(float(accuracy_model2.rstrip('\n')))
if args.importance:
with open(args.importance, "r") as importance:
position = 0
for line in importance:
user_info[id_order[position]].append(float(line.rstrip('\n')))
position += 1
return user_info
else:
return {}
def unpickle():
"""
This function loads Terms from a pickled file and returns them
as a dictionary.
"""
all_terms = {}
with open(args.storage_location, "rb") as infile:
objectlist = pickle.load(infile)
for stored_term in objectlist:
new_term = Term()
for id, value in stored_term:
if id == "id":
new_term.id = value
elif id == "ns":
new_term.ns = value
elif id == "df":
new_term.df = value
elif id == "name":
new_term.name = value
elif id == "imp":
new_term.imp = value
elif id == "p":
new_term.p = value
elif id == "c":
new_term.c = value
elif id == "x":
new_term.x = value
elif id == "y":
new_term.y = value
return all_terms
def getterm(stream):
"""
This function travels through a given datastream and records all lines
until it finds the start of a new term or the end of a useful section
of the obofile.
"""
block = []
for line in stream:
if line.strip() == "[Term]" or line.strip() == "[Typedef]":
break
else:
if line.strip() != "":
block.append(line.strip())
return block
def parsetagvalue(term):
"""
This function obtains all attributes of a term and puts it in
a dictionary. Though it returns an empty string if function
input is not an GO term.
:param term:
A list containing all information from one term obtained
from the obofile.
:return:
"""
if not term or term[0][0:7] != 'id: GO:':
return ''
data = {}
for line in term:
tag = line.split(': ', 1)[0]
value = line.split(': ', 1)[1]
if tag not in data:
data[tag] = []
data[tag].append(value)
return data
def fill_new_term(term, user_info):
"""
This function fills a new Term object with attributes of a given term.
The parents are labeled with their relational type between its child.
User added input like accuracy and importance are only added if
the first model accuracy has been given.
"""
new_term = Term()
new_term.name = term['name']
new_term.id = term['id']
new_term.ns = term['namespace']
new_term.df = term['def']
#add parents
if "is_a" in term:
for parent in term["is_a"]:
new_term.p.add((parent.split(" ! ")[0], "is_a"))
if "relationship" in term:
for parent in term["relationship"]:
relationship, listy = parent.split(' ', 1)
new_term.p.add((listy.split(' ! ')[0], relationship))
#add user info to node
if args.model1:
if new_term.id[0] in user_info:
new_term.acc1 = user_info[new_term.id[0]][0]
if not args.model2 and args.importance:
new_term.imp = user_info[new_term.id[0]][1]
elif args.model2 and not args.importance:
new_term.acc2 = user_info[new_term.id[0]][1]
elif args.model2 and args.importance:
new_term.acc2 = user_info[new_term.id[0]][1]
new_term.imp = user_info[new_term.id[0]][2]
else:
print("Something went wrong with obtaining user attributes!", new_term.id)
return new_term
def precut(layers, links, all_terms, user_info):
"""
This function cuts terms in layers if they do not exist inside
the accuracy file of model 1.
It also cuts all links if one of the terms inside does not exist
inside the accuracy file of model 1.
Finaly it cuts all terms taht do not exist inside the accuracy
file of model 1.
:return:
Cut layers, links (edges) and terms are returned.
"""
new_layers = []
for layer in layers:
new_layer = []
for node in layer:
if node in user_info:
new_layer.append(node)
if len(new_layer) != 0:
new_layers.append(new_layer)
else:
new_layer = []
new_links = set()
for link in links:
if link[0] in user_info and link[1] in user_info:
new_links.add(link)
new_all_terms = {}
for term in all_terms:
if term in user_info:
new_all_terms[term] = all_terms[term]
return new_layers, new_links, new_all_terms
def layer_assignment(terms):
"""
This function implements a longest-path algorithm
to assign nodes to layers. this algorithm is based on
Healy, P., & Nikolov, <NAME>. (2013). Hierarchical
Drawing Algorithms. Handbook of Graph Drawing
and Visualization, 409–453
:return:
A list containing layers (lists).
"""
terms_copy = {}
for goid in terms:
terms_copy[goid] = terms[goid].p
final_layers = []
all_layers = {}
last_layer = {}
while 0 != len(terms_copy):
current_layer = []
for term in last_layer:
terms_copy.pop(term)
for term in terms_copy:
add_to_layer = True
for child in terms_copy[term]:
if child[0] not in last_layer and child[0] not in all_layers:
add_to_layer = False
if add_to_layer:
current_layer.append(term)
for term in last_layer:
all_layers[term] = 0
if len(current_layer) != 0:
final_layers.append(current_layer)
last_layer = {}
for item in current_layer:
last_layer[item] = 0
final_layers.reverse()
return final_layers
def edges_and_links(all_terms):
"""
This function assigns children of a node to the parent
and defines links (edges) during this action.
:return:
a list of sets containing (parent, child, relationship type)
"""
links = set()
for term_id in all_terms:
term = all_terms[term_id]
for parent in term.p:
links.add((parent[0], term_id, parent[1]))
all_terms[parent[0]].c.add((term_id, parent[1]))
return links
def store_obofile(all_terms):
"""
This function stores a dictionary of Term objects into a file
through the use of the pickle module.
"""
with open(args.storage_location, "wb") as outfile:
pickableterms = []
for term in all_terms:
term_list = []
for i in all_terms[term].__dict__:
term_list.append([i, all_terms[term].__getattribute__(i)])
pickableterms.append(term_list)
term_list = []
gurkin = pickle.dumps(pickableterms)
outfile.write(gurkin)
def fill_edge_dict(layers, links, all_terms, edge_dict):
"""
This function fills the edge dataframe dictionary
with appropriate content to the edge
:param layers:
A list of layers.
:param links:
A list of link sets.
:param all_terms:
A dictionary with the GO id's as keys and term objects as values.
:param edge_dict:
A dictionary resembeling a dataframe with the keys
as column names and values as rows.
"""
color_dict = {"is_a": "pink", "regulates": "blue", "part_of": "lilac",
"negatively_regulates": "red", "positively_regulates": "green", "NA": "black"}
layer_dict = EdgeFix.make_layer_dict(layers)
for (term1, term2, edgetype) in links:
if "DUMMY" in term2:
if "DUMMY" not in term1:
child, start, cnt, xlist, ylist = term2, layer_dict[term2], 1, [], []
edge_dict["Term1"].append(term1)
edge_dict["Type"].append(edgetype)
edge_dict["Color"].append(color_dict[edgetype])
xlist.append(all_terms[term1].x)
ylist.append(layer_dict[term1])
while "DUMMY" in child:
xlist.append(all_terms[child].x)
ylist.append(layer_dict[child])
assert len(all_terms[child].c) == 1
child = list(all_terms[child].c)[0][0]
xlist.append(all_terms[child].x)
ylist.append(layer_dict[child])
edge_dict["Term2"].append(child)
edge_dict['x'].append(xlist)
edge_dict['y'].append(ylist)
elif "DUMMY" in term1:
pass
else:
edge_dict["Term1"].append(term1)
edge_dict["Term2"].append(term2)
edge_dict["Type"].append(edgetype)
edge_dict["Color"].append(color_dict[edgetype])
edge_dict['x'].append([all_terms[term1].x, all_terms[term2].x])
edge_dict['y'].append([layer_dict[term1], layer_dict[term2]])
def fill_node_dict(layers, links, all_terms, node_dict):
"""
This function fills the node dataframe dictionary
with appropriate content to the term.
:param layers:
A list of layers.
:param links:
A list of link sets
:param all_terms:
A dictionary with the GO idś as keys and term objects as values.
:param node_dict:
A dictionary resembeling a dataframe with the keys
as column names and values as rows.
"""
layer_dict = EdgeFix.make_layer_dict(layers)
layer_len = len(layers)
for term in all_terms:
if "DUMMY" not in term:
term_obj = all_terms[term]
node_dict['Term'].append(str(term))
node_dict['X'].append(term_obj.x)
node_dict['Y'].append(layer_dict[term])
node_dict['Score1'].append(term_obj.acc1)
node_dict['Score2'].append(term_obj.acc2)
node_dict['SearchDescription'].append(" ".join([term_obj.df[0], term_obj.ns[0], term_obj.name[0]]))
node_dict['Description'].append(term_obj.df[0])
node_dict['Namespace'].append(term_obj.ns[0])
node_dict['Importance'].append(term_obj.imp)
node_dict['Difference'].append(term_obj.acc1 - term_obj.acc2 if term_obj.acc1 != "NA" and term_obj.acc2 != "NA" else "NA")
node_dict['Top'].append(layer_dict[term] + 0.3)
node_dict['Bottom'].append(layer_dict[term] - 0.3)
node_dict['Layer'].append(str(layer_len - layer_dict[term] - 1))
index_dict = {}
for i, term in enumerate(node_dict['Term']):
index_dict[term] = i
for term in node_dict['Term']:
if "DUMMY" not in term:
children, parents = [], []
for child in all_terms[term].c:
if "DUMMY" not in child[0] and child[0] in index_dict:
children.append(index_dict[child[0]])
for parent in all_terms[term].p:
if "DUMMY" not in parent[0] and parent[0] in index_dict:
parents.append(index_dict[parent[0]])
node_dict['Children'].append(children)
node_dict['Parents'].append(parents)
def make_tree_dataframe(layers, links, all_terms):
"""
This function constructs two dictionaries which represents
the term dataframe and edge dataframe.
:param layers:
List of layers
:param links:
List of link sets
:param all_terms:
A dictionary with the GO idś as keys and term objects as values.
:return:
edge dataframe, node dataframe
"""
edge_dict = {"Term1": [], "Term2": [], "x": [], "y": [], "Type": [], "Color": []}
node_dict = {"Term": [], "X": [], "Y": [], "Score1": [], "Score2": [], "SearchDescription": [],
"Description": [], "Namespace": [], "Importance": [], "Difference": [], "Top": [],
"Bottom": [], "Parents": [], "Children": [], "Layer": []}
fill_edge_dict(layers, links, all_terms, edge_dict)
fill_node_dict(layers, links, all_terms, node_dict)
edges = pd.DataFrame.from_dict(edge_dict)
nodes =
|
pd.DataFrame.from_dict(node_dict)
|
pandas.DataFrame.from_dict
|
import os
import pickle
from functools import partial
import custom
import pandas as pd
import torch
import torch.nn as nn
from torch.autograd import Variable
CUSTOM_MODEL_TORCH_EXTENSION = ".pth"
REGRESSION_PRED_COLUMN = "Predictions"
def load_serialized_model():
"""
Load the serialized model from it's artifact.
Returns
-------
Any
The deserialized model
Raises
------
FileNotFoundError if a file could not be found in the working directory
whose name starts with `custom_model.`
"""
serialized_model_file = None
model = custom.load_model()
if model:
return model
for filename in os.listdir("."):
path = os.path.join(".", filename)
if os.path.isdir(path):
continue
if filename.endswith(CUSTOM_MODEL_TORCH_EXTENSION):
if serialized_model_file:
raise ValueError(
"Multiple serialized model files found. Remove extra artifacts "
"or overwrite custom.load_model"
)
serialized_model_file = path
if not serialized_model_file:
raise FileNotFoundError(
"Could not find serialized model file. Serialized model file name "
"should have an extension {}".format(CUSTOM_MODEL_TORCH_EXTENSION)
)
model = torch.load(serialized_model_file)
model.eval()
return model
def outer_predict(data, model=None, positive_class_label=None, negative_class_label=None, **kwargs):
"""
Makes predictions against the model using the custom predict
method and returns a pandas DataFrame
If the model is a regression model, the DataFrame will have a single column "Predictions"
If the model is a classification model, the DataFrame will have a column for each class label
with their respective probabilities
Parameters
----------
data: pd.DataFrame
Data to make predictions against
model: Any
The model
positive_class_label: str or None
The positive class label if this is a binary classification prediction request
negative_class_label: str or None
The negative class label if this is a binary classification prediction request
kwargs
Returns
-------
pd.DataFrame
"""
model = model or load_serialized_model()
internal_predict = partial(
_model_predict,
model=model,
positive_class_label=positive_class_label,
negative_class_label=negative_class_label,
**kwargs
)
return custom.predict(data, internal_predict)
def _model_predict(data, model, positive_class_label=None, negative_class_label=None, **kwargs):
"""
Internal prediction method for pytorch,
makes predictions against the model, and returns a pandas DataFrame
If the model is a regression model, the DataFrame will have a single column "Predictions"
If the model is a classification model, the DataFrame will have a column for each class label
with their respective probabilities
Parameters
----------
data: pd.DataFrame
Data to make predictions against
model: Any
The model
positive_class_label: str or None
The positive class label if this is a binary classification prediction request
negative_class_label: str or None
The negative class label if this is a binary classification prediction request
kwargs
Returns
-------
pd.DataFrame
"""
data = Variable(torch.from_numpy(data.values).type(torch.FloatTensor))
with torch.no_grad():
predictions = model(data).cpu().data.numpy()
if positive_class_label is not None and negative_class_label is not None:
if predictions.shape[1] == 1:
predictions = pd.DataFrame(predictions, columns=[positive_class_label])
predictions[negative_class_label] = 1 - predictions[positive_class_label]
else:
predictions = pd.DataFrame(
predictions, columns=[negative_class_label, positive_class_label]
)
else:
predictions =
|
pd.DataFrame(predictions, columns=[REGRESSION_PRED_COLUMN])
|
pandas.DataFrame
|
import pymysql
import pandas as pd
import logging
import traceback
logger = logging.getLogger(__name__)
TABLE_CANDLE_PATTERN = "CREATE TABLE IF NOT EXISTS {table}(" \
" id int(11) UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, " \
" time DATETIME UNIQUE," \
" open FLOAT NOT NULL," \
" high FLOAT," \
" low FLOAT," \
" close FLOAT NOT NULL," \
" volume_to FLOAT," \
" volume_from FLOAT)"
class OneHourCandleDB(object):
def __init__(self):
pass
@staticmethod
def get_connection():
try:
connection = pymysql.connect(host='127.0.0.1',
user='root',
password='<PASSWORD>',
db='hourly_ohlc',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
autocommit=True)
return connection
except pymysql.Error as e:
logger.exception(str(e))
print(traceback.print_exc())
def create_candle_table(self, asset):
connection = self.get_connection()
try:
with connection.cursor() as cursor:
query = TABLE_CANDLE_PATTERN.replace('{table}', asset)
cursor.execute(query)
if cursor.description:
logger.info(cursor.description)
finally:
connection.close()
def does_table_exist(self, table):
connection = self.get_connection()
try:
with connection.cursor(pymysql.cursors.SSCursor) as cursor:
query = "SHOW TABLES"
cursor.execute(query)
tables_in_db = cursor.fetchall()
for table_in_db in tables_in_db:
if table == table_in_db[0]:
return True
return False
finally:
if connection:
connection.close()
def insert_many_candles(self, table, candles):
self.create_candle_table(table)
connection = self.get_connection()
try:
with connection.cursor() as cursor:
query = "INSERT IGNORE INTO " + table + "(time, open, high, low, close, volume_to, volume_from)" \
" VALUES(FROM_UNIXTIME(%s),%s,%s,%s,%s,%s,%s)"
cursor.execute("SET time_zone='+00:00'")
cursor.executemany(query, candles)
if cursor.description:
logger.info(cursor.description)
return cursor.rowcount
finally:
if connection:
connection.close()
def get_last_candle(self, table):
connection = self.get_connection()
try:
with connection.cursor() as cursor:
query = "SELECT time FROM " + table + " ORDER BY time DESC LIMIT 1"
cursor.execute(query)
fetched_data = cursor.fetchone()
if fetched_data:
return fetched_data['time']
finally:
if connection:
connection.close()
def aggregate_candles(self, candles, aggregate):
idx = 0
res_candles = []
while True:
try:
open_time = candles[idx]['time']
if open_time.hour % aggregate == 0:
open_price = candles[idx]['open']
close_price = candles[idx + aggregate - 1]['close']
highest_price = candles[idx]['high']
lowest_price = candles[idx]['low']
total_volume_to = candles[idx]['volume_to']
total_volume_from = candles[idx]['volume_from'] if candles[idx]['volume_from'] else 0
for i in range(idx + 1, idx + aggregate):
if candles[i]['low'] < lowest_price:
lowest_price = candles[i]['low']
if candles[i]['high'] > highest_price:
highest_price = candles[i]['high']
total_volume_to = total_volume_to + candles[i]['volume_to']
total_volume_from = total_volume_from + candles[i]['volume_from'] if candles[i][
'volume_from'] else 0
res_candles.append({
'open': open_price,
'close': close_price,
'high': highest_price,
'low': lowest_price,
'volume_to': total_volume_to,
'volume_from': total_volume_from,
'time': open_time
})
idx = idx + aggregate
else:
idx = idx + 1
except IndexError as e:
break
return res_candles
def get_all_candles_between(self, table, start_dtm, end_dtm, aggregate=1):
connection = self.get_connection()
try:
with connection.cursor() as cursor:
query = "SELECT time, open, high, low, close, volume_from, volume_to " \
"FROM " + table + " WHERE time BETWEEN %s AND %s ORDER BY time ASC"
cursor.execute(query, (start_dtm, end_dtm))
candles = cursor.fetchall()
if candles:
aggregated_candles = self.aggregate_candles(candles, aggregate)
df =
|
pd.DataFrame(aggregated_candles)
|
pandas.DataFrame
|
# Fed Interest Rate Data (Dates are approximate- representing the Sunday of the week when the rate was announced)
import pandas as pd
import numpy as np
from datetime import timedelta
file0319 = pd.read_html('https://www.federalreserve.gov/monetarypolicy/openmarket.htm')
file9002 = pd.read_html('https://www.federalreserve.gov/monetarypolicy/openmarket_archive.htm')
# DF containing all instances where the Fed released a new funds rate
rates_df = pd.DataFrame()
file2_yrs = list(range(1990,2003))[::-1]
file2_yrs.remove(1993)
years = [[2019, 2018, 2017, 2016, 2015, 2008, 2007, 2006, 2005, 2004, 2003], file2_yrs]
counter1 = 0
for file in [file0319, file9002]:
counter2 = 0
file_df = pd.DataFrame()
for table in file:
table['Year'] = list(np.full(len(table),years[counter1][counter2]))
file_df = pd.concat([file_df, table])
counter2 += 1
rates_df = pd.concat([rates_df, file_df])
counter1 += 1
rates_df['Date'] = rates_df['Date'] + ', ' + rates_df['Year'].astype(str)
rates_df['Date'] = pd.to_datetime(rates_df['Date'])
rates_df['Week_Num'] = rates_df['Date'].dt.week
rates_df['Target_Rate'] = rates_df['Level (%)'].astype(str).apply(lambda x:x.split('-')[-1]).astype(float)
rates_df['YrWk'] = rates_df['Year'].astype(str) + rates_df['Week_Num'].astype(str)
rates_df = rates_df.drop(['Increase', 'Decrease', 'Level (%)', 'Year'], axis=1)
rates_df = rates_df[::-1].reset_index(drop=True)
# Filling in weekly dates b/w dates in rates_df.Date to create a smooth x-axis time range for plotting
master_df = rates_df[['Date', 'Target_Rate']].head(1)
dates = rates_df.Date.values
counter = 1
for date in dates[:-1]:
df = rates_df[rates_df.Date == date]
rate = df.Target_Rate.iloc[0]
next_date = pd.to_datetime(dates[counter])
fill_dates = pd.date_range(pd.to_datetime(date), next_date, freq='W')
fill_df = pd.DataFrame({'Date':fill_dates, 'Target_Rate':list(np.full(len(fill_dates),rate))})
master_df = pd.concat([master_df, fill_df])
counter += 1
master_df = pd.concat([master_df, rates_df[['Date', 'Target_Rate']].tail(1)])
master_df['Year'] = master_df.Date.dt.year
master_df['Week_Num'] = master_df.Date.dt.week
master_df['YrWk'] = master_df.Year.astype(str) + master_df.Week_Num.astype(str)
master_df = master_df.drop_duplicates(subset='YrWk', keep='last')
master_df = master_df.reset_index(drop=True)
# Adding last date and filling data up to today
today_rate = master_df.tail(1).Target_Rate.iloc[0]
last_date = master_df.tail(1).Date.iloc[0]
today_date = pd.to_datetime(pd.datetime.now().strftime("%Y-%m-%d"))
remaining_dates =
|
pd.date_range(last_date, today_date, freq='W')
|
pandas.date_range
|
#!/usr/bin/python
"""code to analyze the outputs of our 2d tuning model
"""
import matplotlib as mpl
# we do this because sometimes we run this without an X-server, and this backend doesn't need
# one. We set warn=False because the notebook uses a different backend and will spout out a big
# warning to that effect; that's unnecessarily alarming, so we hide it.
mpl.use('svg', warn=False)
import pandas as pd
import numpy as np
import torch
import re
import functools
import os
import argparse
import glob
import itertools
import warnings
from sklearn import metrics
from torch.utils import data as torchdata
from . import model as sfp_model
from tqdm import tqdm
def load_LogGaussianDonut(save_path_stem):
"""this loads and returns the actual model, given the saved parameters, for analysis
"""
# generally want to use cpu
device = torch.device("cpu")
# we try and infer model type from the path name, which we can do assuming we used the
# Snakefile to generate saved model.
vary_amps = save_path_stem.split('_')[-1]
ecc_type = save_path_stem.split('_')[-2]
ori_type = save_path_stem.split('_')[-3]
model = sfp_model.LogGaussianDonut(ori_type, ecc_type, vary_amps)
model.load_state_dict(torch.load(save_path_stem + '_model.pt', map_location=device.type))
model.eval()
model.to(device)
return model
def load_single_model(save_path_stem, load_results_df=True):
"""load in the model, loss df, and model df found at the save_path_stem
we also send the model to the appropriate device
"""
try:
if load_results_df:
results_df = pd.read_csv(save_path_stem + '_results_df.csv')
else:
results_df = pd.read_csv(save_path_stem + '_results_df.csv', nrows=1)
except FileNotFoundError as e:
if load_results_df:
raise e
results_df = None
loss_df = pd.read_csv(save_path_stem + '_loss.csv')
model_history_df = pd.read_csv(save_path_stem + "_model_history.csv")
if 'test_subset' not in loss_df.columns or 'test_subset' not in model_history_df.columns:
# unclear why this happens, it's really strange
test_subset = re.findall('_(c[\d,]+)_', save_path_stem)[0]
if not test_subset.startswith('c'):
raise Exception("Can't grab test_subset from path %s, found %s!" %
(save_path_stem, test_subset))
# this will give it the same spacing as the original version
test_subset = ', '.join(test_subset[1:].split(','))
if "test_subset" not in loss_df.columns:
loss_df['test_subset'] = test_subset
if "test_subset" not in model_history_df.columns:
model_history_df['test_subset'] = test_subset
model = load_LogGaussianDonut(save_path_stem)
return model, loss_df, results_df, model_history_df
def combine_models(base_path_template, load_results_df=True, groupaverage=False):
"""load in many models and combine into dataframes
returns: model_df, loss_df, results_df
base_path_template: path template where we should find the results. should contain no string
formatting symbols (e.g., "{0}" or "%s") but should contain at least one '*' because we will
use glob to find them (and therefore should point to an actual file when passed to glob, one
of: the loss df, model df, or model paramters).
load_results_df: boolean. Whether to load the results_df or not. Set False if your results_df
are too big and you're worried about having them all in memory. In this case, the returned
results_df will be None.
groupaverage : boolean. Whether to grab the individual subject fits
or the sub-groupaverage subject (which is a bootstrapped average
subject)
"""
models = []
loss_df = []
results_df = []
model_history_df = []
path_stems = []
for p in glob.glob(base_path_template):
if groupaverage and 'sub-groupaverage' not in p:
continue
if not groupaverage and 'sub-groupaverage' in p:
continue
path_stem = (p.replace('_loss.csv', '').replace('_model.pt', '')
.replace('_results_df.csv', '').replace('_model_history.csv', '')
.replace('_preds.pt', ''))
# we do this to make sure we're not loading in the outputs of a model twice (by finding
# both its loss.csv and its results_df.csv, for example)
if path_stem in path_stems:
continue
# based on how these are saved, we can make some assumptions and grab extra info from their
# paths
metadata = {}
if 'tuning_2d_simulated' in path_stem:
metadata['modeling_goal'] = path_stem.split(os.sep)[-2]
elif 'tuning_2d_model' in path_stem:
metadata['subject'] = path_stem.split(os.sep)[-3]
if not groupaverage:
metadata['session'] = path_stem.split(os.sep)[-2]
else:
session_dir = path_stem.split(os.sep)[-2]
metadata['session'] = session_dir.split('_')[0]
metadata['groupaverage_seed'] = session_dir.split('_')[-1]
metadata['modeling_goal'] = path_stem.split(os.sep)[-4]
metadata['mat_type'] = path_stem.split(os.sep)[-5]
metadata['atlas_type'] = path_stem.split(os.sep)[-6]
metadata['task'] = re.search('_(task-[a-z0-9]+)_', path_stem).groups()[0]
metadata['indicator'] = str((metadata['subject'], metadata['session'], metadata['task'])).replace("'", "")
path_stems.append(path_stem)
model, loss, results, model_history = load_single_model(path_stem,
load_results_df=load_results_df)
for k, v in metadata.items():
if results is not None:
results[k] = v
loss[k] = v
model_history[k] = v
results_df.append(results)
loss_df.append(loss)
model_history_df.append(model_history)
tmp = loss.head(1)
tmp = tmp.drop(['epoch_num', 'batch_num', 'loss'], 1)
tmp['model'] = model
for name, val in model.named_parameters():
tmper = tmp.copy()
tmper['model_parameter'] = name
tmper['fit_value'] = val.cpu().detach().numpy()
if results is not None:
if 'true_model_%s' % name in results.columns:
tmper['true_value'] = results['true_model_%s' % name].unique()[0]
models.append(tmper)
loss_df = pd.concat(loss_df).reset_index(drop=True)
model_history_df = pd.concat(model_history_df).reset_index(drop=True)
if load_results_df:
results_df = pd.concat(results_df).reset_index(drop=True).drop('index', 1)
else:
results_df = None
models = pd.concat(models)
return models, loss_df, results_df, model_history_df
def _finish_feature_df(df, reference_frame='absolute'):
"""helper function to clean up the feature dataframes
This helper function cleans up the feature dataframes so that they
can be more easily used for plotting with feature_df_plot and
feature_df_polar_plot functions. It performs the following actions:
1. Adds reference_frame as column.
2. Converts retinotopic angles to human-readable labels (only if
default retinotopic angles used).
3. Adds "Stimulus type" as column, giving human-readable labels
based on "Orientation" columns.
Parameters
----------
df : pd.DataFrame
The feature dataframe to finish up
reference_frame : {'absolute, 'relative'}
The reference frame of df
Returns
-------
df : pd.DataFrame
The cleaned up dataframe
"""
if isinstance(df, list):
df = pd.concat(df).reset_index(drop=True)
df['reference_frame'] = reference_frame
angle_ref = np.linspace(0, np.pi, 4, endpoint=False)
angle_labels = ['0', r'$\frac{\pi}{4}$', r'$\frac{\pi}{2}$', r'$\frac{3\pi}{4}$']
# this may seem backwards (as defined in first_level_analysis.py, forward
# spirals/diagonals are those where w_r/w_x = w_a/w_y, reverse are where
# they're negatives of each other), but this is the correct mapping, which
# you can see by playing around with sfp.figures.input_schematic (and by
# seeing that the predictions for the obliques in one reference frame match
# correctly with the cardinals in the other, e.g., the prediction for
# forward diagonal should be the same as angular at 45 degrees, in the top
# right quadrant)
rel_labels = ['radial', 'reverse spiral', 'angular', 'forward spiral']
abs_labels = ['vertical', 'reverse diagonal', 'horizontal', 'forward diagonal']
if np.array_equiv(angle_ref, df["Retinotopic angle (rad)"].unique()):
df["Retinotopic angle (rad)"] = df["Retinotopic angle (rad)"].map(dict((k, v) for k, v in
zip(angle_ref,
angle_labels)))
if reference_frame == 'relative':
df["Stimulus type"] = df["Orientation (rad)"].map(dict((k, v) for k, v in
zip(angle_ref, rel_labels)))
elif reference_frame == 'absolute':
df["Stimulus type"] = df["Orientation (rad)"].map(dict((k, v) for k, v in
zip(angle_ref, abs_labels)))
return df
def create_preferred_period_df(model, reference_frame='absolute',
retinotopic_angle=np.linspace(0, np.pi, 4, endpoint=False),
orientation=np.linspace(0, np.pi, 4, endpoint=False),
eccentricity=np.linspace(0, 11, 11)):
"""Create dataframe summarizing preferred period as function of eccentricity
Generally, you should not call this function directly, but use
create_feature_df. Differences from that function: this functions
requires the initialized model and only creates the info for a
single model, while create_feature_df uses the models dataframe to
initialize models itself, and combines the outputs across multiple
scanning sessions.
This function creates a dataframe summarizing the specified model's
preferred period as a function of eccentricity, for multiple
stimulus orientations (in either absolute or relative reference
frames) and retinotopic angles. This dataframe is then used for
creating plots to summarize the model.
You can also use this function to create the information necessary
to plot preferred period as a function of retinotopic angle at a
specific eccentricity. You can do that by reducing the number of
eccentricities (e.g., eccentricity=[5]) and increasing the number of
retinotopic angles (e.g., np.linspace(0, 2*np.pi, 49)).
Unless you have something specific in mind, you can trust the
default options for retinotopic_angle, orientation, and
eccentricity.
Parameters
----------
model : sfp.model.LogGaussianDonut
a single, initialized model, which we will summarize.
reference_frame : {"absolute", "relative"}, optional
Whether we use the absolute or relative reference frame in the
feature dataframe; that is whether we consider conventional
gratings (absolute, orientation is relative to
vertical/horizontal), or our log-polar gratings (relative,
orientation is relative to fovea).
retinotopic_angle : np.array, optional
Array specifying which retinotopic angles to find the preferred
period for. If you don't care about retinotopic angle and just
want to summarize the model's overall features, you should use
the default (which includes all angles where the model can have
different preferences, based on its parametrization) and then
average over them.
orientation : np.array, optional
Array specifying which stimulus orientations to find the
preferred period for. Note that the meaning of these
orientations will differ depending on the value of
reference_frame; you should most likely plot and interpret the
output based on the "Stimulus type" column instead (which
include strings like 'vertical'/'horizontal' or
'radial'/'angular'). However, this mapping can only happen if
the values in orientation line up with our stimuli (0, pi/4,
pi/2, 3*pi/2), and thus it's especially recommended that you use
the default value for this argument. If you don't care about
orientation and just want to summarize the model's overall
features, you should use the default (which includes all
orientations where the model can have different preferences,
based on its parametrization) and then average over them.
eccentricity : np.array, optional
Array specifying which eccentricities to find the preferred
period for. The default values span the range of measurements
for our experiment, but you can certainly go higher if you
wish. Note that, for creating the plot of preferred period as a
function of eccentricity, the model's predictions will always be
linear and so you most likely only need 2 points. More are
included because you may want to examine the preferred period at
specific eccentricities
Returns
-------
preferred_period_df : pd.DataFrame
Dataframe containing preferred period of the model, to use with
sfp.plotting.feature_df_plot for plotting preferred period as a
function of eccentricity.
"""
df = []
for o in orientation:
if reference_frame == 'absolute':
tmp = model.preferred_period(eccentricity, retinotopic_angle, o)
elif reference_frame == 'relative':
tmp = model.preferred_period(eccentricity, retinotopic_angle, rel_sf_angle=o)
tmp = pd.DataFrame(tmp.detach().numpy(), index=retinotopic_angle, columns=eccentricity)
tmp = tmp.reset_index().rename(columns={'index': 'Retinotopic angle (rad)'})
tmp['Orientation (rad)'] = o
df.append(pd.melt(tmp, ['Retinotopic angle (rad)', 'Orientation (rad)'],
var_name='Eccentricity (deg)', value_name='Preferred period (deg)'))
return _finish_feature_df(df, reference_frame)
def create_preferred_period_contour_df(model, reference_frame='absolute',
retinotopic_angle=np.linspace(0, 2*np.pi, 49),
orientation=np.linspace(0, np.pi, 4, endpoint=False),
period_target=[.5, 1, 1.5], ):
"""Create dataframe summarizing preferred period as function of retinotopic angle
Generally, you should not call this function directly, but use
create_feature_df. Differences from that function: this functions
requires the initialized model and only creates the info for a
single model, while create_feature_df uses the models dataframe to
initialize models itself, and combines the outputs across multiple
scanning sessions.
This function creates a dataframe summarizing the specified model's
preferred period as a function of retinotopic angle, for multiple
stimulus orientations (in either absolute or relative reference
frames) and target periods. That is, it contains information showing
at what eccentricity the model's preferred period is, e.g., 1 for a
range of retinotopic angles and stimulus orientation. This dataframe
is then used for creating plots to summarize the model.
So this function creates information to plot iso-preferred period
lines. If you want to plot preferred period as a function of
retinotopic angle for a specific eccentricity, you can do that with
create_preferred_period_df, by reducing the number of eccentricities
(e.g., eccentricity=[5]) and increasing the number of retinotopic
angles (e.g., np.linspace(0, 2*np.pi, 49))
Unless you have something specific in mind, you can trust the
default options for retinotopic_angle, orientation, and
period_target
Parameters
----------
model : sfp.model.LogGaussianDonut
a single, initialized model, which we will summarize.
reference_frame : {"absolute", "relative"}, optional
Whether we use the absolute or relative reference frame in the
feature dataframe; that is whether we consider conventional
gratings (absolute, orientation is relative to
vertical/horizontal), or our log-polar gratings (relative,
orientation is relative to fovea).
retinotopic_angle : np.array, optional
Array specifying which retinotopic angles to find the preferred
period for. Note that the sampling of retinotopic angle is much
finer than for create_preferred_period_df (and goes all the way
to 2*pi), because this is what we will use as the dependent
variable in our plotsl
orientation : np.array, optional
Array specifying which stimulus orientations to find the
preferred period for. Note that the meaning of these
orientations will differ depending on the value of
reference_frame; you should most likely plot and interpret the
output based on the "Stimulus type" column instead (which
include strings like 'vertical'/'horizontal' or
'radial'/'angular'). However, this mapping can only happen if
the values in orientation line up with our stimuli (0, pi/4,
pi/2, 3*pi/2), and thus it's especially recommended that you use
the default value for this argument. If you don't care about
orientation and just want to summarize the model's overall
features, you should use the default (which includes all
orientations where the model can have different preferences,
based on its parametrization) and then average over them.
period_target : np.array, optional
Array specifying which the target periods for the model. The
intended use of this dataframe is to plot contour plots showing
at what eccentricity the model will have a specified preferred
period (for a range of angles and orientations), and this
argument specifies those periods.
Returns
-------
preferred_period_contour_df : pd.DataFrame
Dataframe containing preferred period of the model, to use with
sfp.plotting.feature_df_polar_plot for plotting preferred period
as a function of retinotopic angle.
"""
df = []
for p in period_target:
if reference_frame == 'absolute':
tmp = model.preferred_period_contour(p, retinotopic_angle, orientation)
elif reference_frame == 'relative':
tmp = model.preferred_period_contour(p, retinotopic_angle, rel_sf_angle=orientation)
tmp = pd.DataFrame(tmp.detach().numpy(), index=retinotopic_angle, columns=orientation)
tmp = tmp.reset_index().rename(columns={'index': 'Retinotopic angle (rad)'})
tmp['Preferred period (deg)'] = p
df.append(pd.melt(tmp, ['Retinotopic angle (rad)', 'Preferred period (deg)'],
var_name='Orientation (rad)', value_name='Eccentricity (deg)'))
return _finish_feature_df(df, reference_frame)
def create_max_amplitude_df(model, reference_frame='absolute',
retinotopic_angle=np.linspace(0, 2*np.pi, 49),
orientation=np.linspace(0, np.pi, 4, endpoint=False)):
"""Create dataframe summarizing max amplitude as function of retinotopic angle
Generally, you should not call this function directly, but use
create_feature_df. Differences from that function: this functions
requires the initialized model and only creates the info for a
single model, while create_feature_df uses the models dataframe to
initialize models itself, and combines the outputs across multiple
scanning sessions.
This function creates a dataframe summarizing the specified model's
maximum amplitude as a function of retinotopic angle, for multiple
stimulus orientations (in either absolute or relative reference
frames). This dataframe is then used for creating plots to summarize
the model.
Unless you have something specific in mind, you can trust the
default options for retinotopic_angle and orientation.
Parameters
----------
model : sfp.model.LogGaussianDonut
a single, initialized model, which we will summarize.
reference_frame : {"absolute", "relative"}, optional
Whether we use the absolute or relative reference frame in the
feature dataframe; that is whether we consider conventional
gratings (absolute, orientation is relative to
vertical/horizontal), or our log-polar gratings (relative,
orientation is relative to fovea).
retinotopic_angle : np.array, optional
Array specifying which retinotopic angles to find the preferred
period for. Note that the sampling of retinotopic angle is much
finer than for create_preferred_period_df (and goes all the way
to 2*pi), because this is what we will use as the dependent
variable in our plotsl
orientation : np.array, optional
Array specifying which stimulus orientations to find the
preferred period for. Note that the meaning of these
orientations will differ depending on the value of
reference_frame; you should most likely plot and interpret the
output based on the "Stimulus type" column instead (which
include strings like 'vertical'/'horizontal' or
'radial'/'angular'). However, this mapping can only happen if
the values in orientation line up with our stimuli (0, pi/4,
pi/2, 3*pi/2), and thus it's especially recommended that you use
the default value for this argument. If you don't care about
orientation and just want to summarize the model's overall
features, you should use the default (which includes all
orientations where the model can have different preferences,
based on its parametrization) and then average over them.
Returns
-------
max_amplitude_df : pd.DataFrame
Dataframe containing maximum amplitude of the model, to use with
sfp.plotting.feature_df_polar_plot for plotting max amplitude as
a function of retinotopic angle.
"""
if reference_frame == 'absolute':
tmp = model.max_amplitude(retinotopic_angle, orientation).detach().numpy()
elif reference_frame == 'relative':
tmp = model.max_amplitude(retinotopic_angle, rel_sf_angle=orientation).detach().numpy()
tmp = pd.DataFrame(tmp, index=retinotopic_angle, columns=orientation)
tmp = tmp.reset_index().rename(columns={'index': 'Retinotopic angle (rad)'})
df = pd.melt(tmp, ['Retinotopic angle (rad)'], var_name='Orientation (rad)',
value_name='Max amplitude')
return _finish_feature_df(df, reference_frame)
def create_feature_df(models, feature_type='preferred_period', reference_frame='absolute',
gb_cols=['subject', 'bootstrap_num'], **kwargs):
"""Create dataframe to summarize the predictions made by our models
The point of this dataframe is to generate plots (using
plotting.feature_df_plot and plotting.feature_df_polar_plot) to
easily visualize what the parameters of our model mean, either for
demonstrative purposes or with the parameters fit to actual data.
This is used to create a feature data frame that combines info
across multiple models, using the columns indicated in gb_cols to
separate them, and serves as a wrapper around three other functions:
create_preferred_period_df, create_preferred_period_contour_df, and
create_max_amplitude_df (based on the value of the feature_type
arg). We loop through the unique subsets of the data given by
models.groupby(gb_cols) in the models dataframe and instantiate a
model for each one (thus, each subset must only have one associated
model). We then create dataframes summarizing the relevant features,
add the identifying information, and, concatenate.
The intended use of these dataframes is to create plots showing the
models' predictions for (using bootstraps to get confidence
intervals to show variability across subjects):
1. preferred period as a function of eccentricity:
```
pref_period = create_feature_df(models, feature_type='preferred_period')
sfp.plotting.feature_df_plot(pref_period)
# average over retinotopic angle
sfp.plotting.feature_df_plot(pref_period, col=None,
pre_boot_gb_func=np.mean)
```
2. preferred period as a function of a function of retinotopic
angle of stimulus orientation at a given eccentricity:
```
pref_period = create_feature_df(models, feature_type='preferred_period',
eccentricity=[5],
retinotopic_angle=np.linspace(0, 2*np.pi, 49))
sfp.plotting.feature_df_polar_plot(pref_period, col='Eccentricity (deg)',
r='Preferred period (deg)')
```
3. iso-preferred period lines as a function of retinotopic angle and
stimulus orientation (i.e., at what eccentricity do you have a
preferred period of 1 for this angle and orientation):
```
pref_period_contour = create_feature_df(models,
feature_type='preferred_period_contour')
sfp.plotting.feature_df_polar_plot(pref_period_contour)
```
4. max amplitude as a function of retinotopic angle and stimulus
orientation:
```
max_amp = create_feature_df(models, feature_type='max_amplitude')
sfp.plotting.feature_df_polar_plot(max_amp, col=None, r='Max amplitude')
```
Parameters
----------
models : pd.DataFrame
dataframe summarizing model fits across many subjects / sessions
(as created by analyze_model.combine_models function). Must
contain the columns indicated in gb_cols and a row for each of
the model's 11 parameters
feature_type : {"preferred_period", "preferred_period_contour", "max_amplitude"}, optional
Which feature dataframe to create. Determines which function we
call, from create_preferred_period_df,
create_preferred_period_contour_df, and create_max_amplitude_df
reference_frame : {"absolute", "relative"}, optional
Whether we use the absolute or relative reference frame in the
feature dataframe; that is whether we consider conventional
gratings (absolute, orientation is relative to
vertical/horizontal), or our log-polar gratings (relative,
orientation is relative to fovea).
gb_cols : list, optional
list of strs indicating columns in the df to groupby. when we
groupby these columns, each subset should give a single
model. Thus, this should be something like ['subject'],
['subject', 'bootstrap_num'], or ['subject', 'session']
(depending on the contents of your df)
kwargs : {retinotopic_angle, orientation, eccentricity, period_target}
passed to the various create_*_df functions. See their
docstrings for more info. if not set, use the defaults.
Returns
-------
feature_df : pd.DataFrame
Dataframe containing specified feature info
"""
df = []
for n, g in models.groupby(gb_cols):
m = sfp_model.LogGaussianDonut.init_from_df(g)
if feature_type == 'preferred_period':
df.append(create_preferred_period_df(m, reference_frame, **kwargs))
elif feature_type == 'preferred_period_contour':
df.append(create_preferred_period_contour_df(m, reference_frame, **kwargs))
elif feature_type == 'max_amplitude':
df.append(create_max_amplitude_df(m, reference_frame, **kwargs))
# in this case, gb_cols is a list with one element, so n will
# just be a single element (probably a str). In order for the
# following dict(zip(gb_cols, n)) call to work correctly, both
# have to be lists with the same length, so this ensures that.
if len(gb_cols) == 1:
n = [n]
df[-1] = df[-1].assign(**dict(zip(gb_cols, n)))
return pd.concat(df).reset_index(drop=True)
def collect_final_loss(paths):
"""collect up the loss files, add some metadata, and concat
We loop through the paths, loading each in, grab the last epoch, add
some metadata by parsing the path, and then concatenate and return
the resulting df
Note that the assumed use here is collecting the loss.csv files
created by the different folds of cross-validation, but we don't
explicitly check for that and so this maybe useful in other contexts
Parameters
----------
paths : list
list of strs giving the paths to the loss files. we attempt to
parse these strings to find the subject, session, and task, and
will raise an Exception if we can't do so
Returns
-------
df : pd.DataFrame
the collected loss
"""
df = []
print(f"Loading in {len(paths)} total paths")
pbar = tqdm(range(len(paths)))
for i in pbar:
p = paths[i]
regexes = [r'(sub-[a-z0-9]+)', r'(ses-[a-z0-9]+)', r'(task-[a-z0-9]+)']
regex_names = ['subject', 'session', 'task']
if 'sub-groupaverage' in p:
regex_names.append('groupaverage_seed')
regexes.append(r'(_s[0-9]+)')
pbar.set_postfix(path=os.path.split(p)[-1])
tmp = pd.read_csv(p)
last_epoch = tmp.epoch_num.unique().max()
tmp = tmp.query("epoch_num == @last_epoch")
for n, regex in zip(regex_names, regexes):
res = re.findall(regex, p)
if len(set(res)) != 1:
raise Exception(f"Unable to infer {n} from path {p}!")
tmp[n] = res[0]
df.append(tmp)
return
|
pd.concat(df)
|
pandas.concat
|
from data_handler.graph_class import Graph,wl_labeling
import networkx as nx
#from utils import per_section,indices_to_one_hot
from collections import defaultdict
import numpy as np
import math
import os
from tqdm import tqdm
import pickle
import pandas as pd
#%%
def indices_to_one_hot(number, nb_classes,label_dummy=-1):
"""Convert an iterable of indices to one-hot encoded labels."""
if number==label_dummy:
return np.zeros(nb_classes)
else:
return np.eye(nb_classes)[number]
def per_section(it, is_delimiter=lambda x: x.isspace()):
ret = []
for line in it:
if is_delimiter(line):
if ret:
yield ret # OR ''.join(ret)
ret = []
else:
ret.append(line.rstrip()) # OR ret.append(line)
if ret:
yield ret
def data_streamer(data_path,batchsize_bylabel, selected_labels,balanced_shapes=False,sampling_seed=None,return_idx = False):
batch_graphs, batch_labels = [],[]
if not (sampling_seed is None):
np.random.seed(sampling_seed)
if return_idx:
batch_idx=[]
if not balanced_shapes:
for label in selected_labels:
files = os.listdir(data_path+'/label%s/'%label)
file_idx = np.random.choice(range(len(files)), size=batchsize_bylabel,replace=False)
for idx in file_idx:
batch_graphs.append(np.load(data_path+'/label%s/'%label+files[idx]))
batch_labels.append(label)
if return_idx:
ls = file_idx[idx].split('.')
batch_idx.append(int(ls[0][:5]))
if return_idx:
return batch_graphs,batch_labels,batch_idx
else:
return batch_graphs,batch_labels
else:
shapes={}
graphidx_shapes={}
for label in selected_labels:
files = os.listdir(data_path+'/label%s/'%label)
shapes[label]=[]
graphidx_shapes[label]=[]
print('label = ', label)
for filename in tqdm(files):
local_idx = int(filename.split('.')[0][5:])
graphidx_shapes[label].append(local_idx)
shapes[label].append(np.load(data_path+'/label%s/'%label+filename).shape[0])
unique_shapes= np.unique(shapes[label])
sizebylabel = batchsize_bylabel//len(unique_shapes)
for local_shape in unique_shapes:
local_idx_list = np.argwhere(shapes[label]==local_shape)[:,0]
sampled_idx = np.random.choice(local_idx_list, size=sizebylabel, replace=False)
for idx in sampled_idx:
graphidx = graphidx_shapes[label][idx]
batch_graphs.append(np.load(data_path+'/label%s/graph%s.npy'%(label,graphidx)))
batch_labels.append(label)
return batch_graphs,batch_labels
def load_local_data(data_path,name,one_hot=False,attributes=True,use_node_deg=False):
""" Load local datasets - modified version
Parameters
----------
data_path : string
Path to the data. Must link to a folder where all datasets are saved in separate folders
name : string
Name of the dataset to load.
Choices=['mutag','ptc','nci1','imdb-b','imdb-m','enzymes','protein','protein_notfull','bzr','cox2','synthetic','aids','cuneiform']
one_hot : integer
If discrete attributes must be one hotted it must be the number of unique values.
attributes : bool, optional
For dataset with both continuous and discrete attributes.
If True it uses the continuous attributes (corresponding to "Node Attr." in [5])
use_node_deg : bool, optional
Wether to use the node degree instead of original labels.
Returns
-------
X : array
array of Graph objects created from the dataset
y : array
classes of each graph
References
----------
[5] <NAME> and <NAME> and <NAME> and <NAME> and <NAME>
"Benchmark Data Sets for Graph Kernels"
"""
name_to_path_discretefeatures={'mutag':data_path+'/MUTAG_2/',
'ptc':data_path+'/PTC_MR/',
'triangles':data_path+'/TRIANGLES/'}
name_to_path_realfeatures={'enzymes':data_path+'/ENZYMES_2/',
'protein':data_path+'/PROTEINS_full/',
'protein_notfull':data_path+'/PROTEINS/',
'bzr':data_path+'/BZR/',
'cox2':data_path+'/COX2/'}
name_to_rawnames={'mutag':'MUTAG', 'ptc':'PTC_MR','triangles':'TRIANGLES',
'enzymes':'ENZYMES','protein':'PROTEINS_full','protein_notfull':'PROTEINS',
'bzr':'BZR','cox2':'COX2',
'imdb-b':'IMDB-BINARY', 'imdb-m':'IMDB-MULTI','reddit':'REDDIT-BINARY','collab':'COLLAB'}
if name in ['mutag','ptc','triangles']:
dataset = build_dataset_discretefeatures(name_to_rawnames[name],
name_to_path_discretefeatures[name],
one_hot=one_hot)
elif name in ['enzymes','protein', 'protein_notfull','bzr','cox2']:
dataset = build_dataset_realfeatures(name_to_rawnames[name], name_to_path_realfeatures[name],
type_attr='real',use_node_deg=use_node_deg)
elif name in ['imdb-b','imdb-m','reddit', 'collab']:
rawname = name_to_rawnames[name]
dataset = build_dataset_withoutfeatures(rawname, data_path+'/%s/'%rawname,use_node_deg= use_node_deg)
else:
raise 'unknown dataset'
X,y=zip(*dataset)
return np.array(X),np.array(y)
def build_noisy_circular_graph(N=20,mu=0,sigma=0.3,with_noise=False,structure_noise=False,p=None):
g=Graph()
g.add_nodes(list(range(N)))
for i in range(N):
noise=float(np.random.normal(mu,sigma,1))
if with_noise:
g.add_one_attribute(i,math.sin((2*i*math.pi/N))+noise)
else:
g.add_one_attribute(i,math.sin(2*i*math.pi/N))
g.add_edge((i,i+1))
if structure_noise:
randomint=np.random.randint(0,p)
if randomint==0:
if i<=N-3:
g.add_edge((i,i+2))
if i==N-2:
g.add_edge((i,0))
if i==N-1:
g.add_edge((i,1))
g.add_edge((N,0))
noise=float(np.random.normal(mu,sigma,1))
if with_noise:
g.add_one_attribute(N,math.sin((2*N*math.pi/N))+noise)
else:
g.add_one_attribute(N,math.sin(2*N*math.pi/N))
return g
def load_largegraphs(data_path, dataset_name,undirected=True):
abspath = os.path.abspath('./')
name_to_file = {'EU':'eu-email.p',
'village':'India_database.p',
'amazon':'amazon.p',
'wikicats':'wikicats.p'}
database = pickle.load(open(abspath+data_path+name_to_file[dataset_name],'rb'))
if not undirected:# directed graphs we could experimentally switch to undirected graphs
assert dataset_name in ['EU','wikicats']
if dataset_name in ['EU','wikicats']:
G = nx.to_numpy_array(database['G'])
node_labels = database['labels']
else:
raise 'unknown dataset name'
else: #undirected graphs
assert dataset_name in ['EU','amazon','wikicats', 'village']
if dataset_name in ['EU','amazon', 'wikicats']:
G = nx.to_numpy_array(database['G'].to_undirected())
node_labels = database['labels']
elif dataset_name in ['village']:
node_labels = database['label']
num_nodes = len(node_labels)
G_ = nx.Graph()
for i in range(num_nodes):
G_.add_node(i)
for edge in database['edges']:
G_.add_edge(edge[0], edge[1])
G= nx.adjacency_matrix(G_).toarray()
else:
raise 'unknown dataset name'
return G, node_labels
#%%
def histog(X,bins=10):
node_length=[]
for graph in X:
node_length.append(len(graph.nodes()))
return np.array(node_length),{'histo':np.histogram(np.array(node_length),bins=bins),'med':np.median(np.array(node_length))
,'max':np.max(np.array(node_length)),'min':np.min(np.array(node_length))}
def node_labels_dic(path,name):
node_dic=dict()
with open(path+name) as f:
sections = list(per_section(f))
k=1
for elt in sections[0]:
node_dic[k]=int(elt)
k=k+1
return node_dic
def node_attr_dic(path,name):
node_dic=dict()
with open(path+name) as f:
sections = list(per_section(f))
k=1
for elt in sections[0]:
node_dic[k]=[float(x) for x in elt.split(',')]
k=k+1
return node_dic
def graph_label_list(path,name,real=False):
graphs=[]
with open(path+name) as f:
sections = list(per_section(f))
k=1
for elt in sections[0]:
if real:
graphs.append((k,float(elt)))
else:
graphs.append((k,int(elt)))
k=k+1
return graphs
def graph_indicator(path,name):
data_dict = defaultdict(list)
with open(path+name) as f:
sections = list(per_section(f))
k=1
for elt in sections[0]:
data_dict[int(elt)].append(k)
k=k+1
return data_dict
def compute_adjency(path,name):
adjency= defaultdict(list)
with open(path+name) as f:
sections = list(per_section(f))
for elt in sections[0]:
adjency[int(elt.split(',')[0])].append(int(elt.split(',')[1]))
return adjency
def all_connected(X):
a=[]
for graph in X:
a.append(nx.is_connected(graph.nx_graph))
return np.all(a)
#%% TO FACTORIZE !!!!!!!!!!!
def build_dataset_discretefeatures(dataset_name,path,one_hot=False):
assert dataset_name in ['MUTAG','PTC_MR','TRIANGLES']
name_to_ncategories={'MUTAG':7, 'PTC_MR':18}
n_categories = name_to_ncategories[dataset_name]
graphs=graph_label_list(path,'%s_graph_labels.txt'%dataset_name)
adjency=compute_adjency(path,'%s_A.txt'%dataset_name)
data_dict=graph_indicator(path,'%s_graph_indicator.txt'%dataset_name)
node_dic=node_labels_dic(path,'%s_node_labels.txt'%dataset_name)
data=[]
for i in graphs:
g=Graph()
for node in data_dict[i[0]]:
g.name=i[0]
g.add_vertex(node)
if one_hot:
attr=indices_to_one_hot(node_dic[node],n_categories)
g.add_one_attribute(node,attr)
else:
g.add_one_attribute(node,node_dic[node])
for node2 in adjency[node]:
g.add_edge((node,node2))
data.append((g,i[1]))
return data
def build_dataset_realfeatures(dataset_name,path,type_attr='label',use_node_deg=False):
assert dataset_name in ['PROTEINS_full','PROTEINS','ENZYMES','BZR','COX2']
if type_attr=='label':
node_dic=node_labels_dic(path,'%s_node_labels.txt'%dataset_name)
if type_attr=='real':
node_dic=node_attr_dic(path,'%s_node_attributes.txt'%dataset_name)
graphs=graph_label_list(path,'%s_graph_labels.txt'%dataset_name)
adjency=compute_adjency(path,'%s_A.txt'%dataset_name)
data_dict=graph_indicator(path,'%s_graph_indicator.txt'%dataset_name)
data=[]
for i in graphs:
g=Graph()
for node in data_dict[i[0]]:
g.name=i[0]
g.add_vertex(node)
if not use_node_deg:
g.add_one_attribute(node,node_dic[node])
for node2 in adjency[node]:
g.add_edge((node,node2))
if use_node_deg:
node_degree_dict=dict(g.nx_graph.degree())
normalized_node_degree_dict={k:v/len(g.nx_graph.nodes()) for k,v in node_degree_dict.items() }
nx.set_node_attributes(g.nx_graph,normalized_node_degree_dict,'attr_name')
data.append((g,i[1]))
return data
def build_dataset_withoutfeatures(dataset_name, path, use_node_deg=False):
assert dataset_name in ['IMDB-MULTI','IMDB-BINARY','REDDIT-BINARY','COLLAB']
graphs=graph_label_list(path,'%s_graph_labels.txt'%dataset_name)
adjency=compute_adjency(path,'%s_A.txt'%dataset_name)
data_dict=graph_indicator(path,'%s_graph_indicator.txt'%dataset_name)
data=[]
for i in tqdm(graphs,desc='loading graphs'):
g=Graph()
for node in data_dict[i[0]]:
g.name=i[0]
g.add_vertex(node)
#g.add_one_attribute(node,node_dic[node])
for node2 in adjency[node]:
g.add_edge((node,node2))
if use_node_deg:
node_degree_dict=dict(g.nx_graph.degree())
normalized_node_degree_dict={k:v/len(g.nx_graph.nodes()) for k,v in node_degree_dict.items() }
nx.set_node_attributes(g.nx_graph,normalized_node_degree_dict,'attr_name')
data.append((g,i[1]))
return data
#%% READ EXPERIMENT RESULTS
def reader_results_FGWdictionary(dataset_name:str,
str_selection:list,
excluded_str_selection:list,
unmixing_validation:bool=False,
parameters:list =['Ntarget','lrC','lrF','init_mode_graph','batch_size','algo_seed','l2_reg'],
aggreg_params='algo_seed',
compute_statistics=True, target_resfile= 'res_clustering.csv', target_unmixingsfile= 'unmixings.npy'):
abs_path = os.path.abspath('../results/')
full_path ='%s/%s/'%(abs_path,dataset_name)
list_experiments = []
res = {}
for p in parameters:
res[p]=[]
res['loss'], res['RI'], res['best_RI']= [],[],[]
res['init_features']=[]
if compute_statistics:
res['involved_components']=[]
res['mean_components']=[]
res['min_components']=[]
res['max_components']=[]
for subrepo in os.listdir(full_path):
if np.all([str_ in subrepo for str_ in str_selection]) and (not np.any([str_ in subrepo for str_ in excluded_str_selection])):
local_path='%s/%s/'%(full_path,subrepo)
try:
# load necessary files
settings = pd.read_csv(local_path+'/settings')
if not ('_seed' in subrepo):
if not unmixing_validation:
local_res = pd.read_csv(local_path+'/res_clustering.csv')
if compute_statistics:
local_OT = pickle.load(open(local_path+'/OT_unmixings.pkl','rb'))
else:
local_res=pd.read_csv(local_path +'/res_clustering_100seeds.csv')
if compute_statistics:
local_OT = pickle.load(open(local_path+'/OT_unmixings_100seeds.pkl','rb'))
#print('local_res:', local_res)
#complete the summary dictionary
for p in parameters:
if p =='alpha':
for x in subrepo.split('_'):
if 'alpha' in x:
res['alpha'].append(np.float(x[5:]))
elif p in ['gamma_entropy','lambda_reg']:
if p in settings.keys():
res[p].append(settings[p].iloc[0])
else:
res[p].append(0)
else:
res[p].append(settings[p].iloc[0])
best_idx_dist = np.argmin(local_res['loss_mean'].values)
res['loss'].append(local_res['loss_mean'].values[best_idx_dist])
res['RI'].append(local_res['RI'].values[best_idx_dist])
res['best_RI'].append(np.max(local_res['RI'].values))
if compute_statistics:
unmixings = np.array([np.sum(T,axis=0) for T in local_OT[best_idx_dist]])
sums=np.sum(unmixings,axis=0)
res['involved_components'].append(np.sum(sums>10**(-15)))
count_components = [np.sum(x>10**(-15)) for x in unmixings]
res['mean_components'].append(np.mean(count_components))
res['max_components'].append(np.max(count_components))
res['min_components'].append(np.min(count_components))
if 'Finitkmeans' in subrepo:
res['init_features'].append('kmeans')
elif 'Finitrange' in subrepo:
res['init_features'].append('range')
else:
res['init_features'].append('random')
else:# we changed the storage method because it was too memory intensive
if not unmixing_validation:
local_res = pd.read_csv(local_path+target_resfile)
if compute_statistics:
unmixings = np.load(local_path+target_unmixingsfile)
else:
local_res=pd.read_csv(local_path +'/res_clustering_100seeds.csv')
if compute_statistics:
unmixings = np.load(local_path+'/unmixings_100seeds.npy')
#print('local_res:', local_res)
#complete the summary dictionary
for p in parameters:
if p =='alpha':
for x in subrepo.split('_'):
if 'alpha' in x:
res['alpha'].append(np.float(x[5:]))
elif p=='use_warmstart':
if not p in settings.keys():
res[p].append(False)
else:
res[p].append(settings[p].iloc[0])
elif p in ['gamma_entropy','lambda_reg']:
if p in settings.keys():
res[p].append(settings[p].iloc[0])
else:
res[p].append(0)
else:
res[p].append(settings[p].iloc[0])
best_idx_dist = np.argmin(local_res['loss_mean'].values)
res['loss'].append(local_res['loss_mean'].values[best_idx_dist])
res['RI'].append(local_res['RI'].values[best_idx_dist])
res['best_RI'].append(np.max(local_res['RI'].values))
if compute_statistics:
sums=np.sum(unmixings[best_idx_dist],axis=0)
res['involved_components'].append(np.sum(sums>10**(-15)))
count_components = [np.sum(x>10**(-15)) for x in unmixings[best_idx_dist]]
res['mean_components'].append(np.mean(count_components))
res['max_components'].append(np.max(count_components))
res['min_components'].append(np.min(count_components))
if 'Finitkmeans' in subrepo:
res['init_features'].append('kmeans')
elif 'Finitrange' in subrepo:
res['init_features'].append('range')
else:
res['init_features'].append('random')
list_experiments.append(subrepo)
except:
continue
for key in res.keys():
print('key: %s / len res: %s'%(key,len(res[key])))
stacked_df = pd.DataFrame(res)
print('stacked_df built ! shape: ', stacked_df.shape)
aggreg_dict = {}
fixed_params= []
exception_keys = ['RI','best_RI','loss']
if compute_statistics:
exception_keys+=['max_components', 'mean_components', 'min_components','involved_components']
for key in res.keys():
if not key in exception_keys+[aggreg_params]:
aggreg_dict[key]=list(np.unique(res[key]))
fixed_params.append(key)
print('fixed params:', fixed_params)
aggreg_df_instantiated = False
idx_to_explore = list(range(stacked_df.shape[0]))
first_key = fixed_params[0]
count =0
nan_count=0
mean_mapper = {}
std_mapper= {}
for key in exception_keys:
mean_mapper[key]= 'mean_%s'%key
std_mapper[key]= 'std_%s'%key
while idx_to_explore !=[]:
if count ==0:
print('len idx_to_explore:', len(idx_to_explore))
print('selected_idx:', idx_to_explore[0])
selected_exp = stacked_df.iloc[idx_to_explore[0]]
sub_df = stacked_df[stacked_df[first_key]==selected_exp[first_key]]
for param in fixed_params[1:]:
sub_df= sub_df[sub_df[param]==selected_exp[param]]
if count ==0:
print('param: %s / sub_df shape: %s'%(param,sub_df.shape))
if not aggreg_df_instantiated:
mean_aggreg_df = sub_df[exception_keys].mean(axis=0).to_frame().T
std_aggreg_df = sub_df[exception_keys].std(axis=0).to_frame().T
mean_aggreg_df.rename(mean_mapper,axis=1,inplace=True)
std_aggreg_df.rename(std_mapper,axis=1,inplace=True)
for key in fixed_params:
mean_aggreg_df[key] = sub_df[key].iloc[0]
std_aggreg_df[key] = sub_df[key].iloc[0]
#print('aggreg_df(n_exp=%s) - shape :'%n_exp,aggreg_df.shape)
aggreg_df_instantiated = True
else:
mean_local_df = sub_df[exception_keys].mean(axis=0).to_frame().T
std_local_df = sub_df[exception_keys].std(axis=0).to_frame().T
mean_local_df.rename(mean_mapper,axis=1,inplace=True)
std_local_df.rename(std_mapper,axis=1,inplace=True)
for key in fixed_params:
try:
mean_local_df[key] = sub_df[key].iloc[0]
std_local_df[key] = sub_df[key].iloc[0]
except:
nan_count+=1
mean_local_df[key] = np.nan
std_local_df[key] = np.nan
#raise 'empty df error'
continue
mean_aggreg_df = pd.concat([mean_aggreg_df.copy(),mean_local_df.copy()])
std_aggreg_df = pd.concat([std_aggreg_df.copy(),std_local_df.copy()])
if count ==0:
print('sub_df.index:', sub_df.index)
for idx in sub_df.index.to_list():
if count ==0:
print('removed_idx:', idx)
idx_to_explore.remove(idx)
count+=1
print('mean_aggreg_df: %s / std_aggreg_df: %s'%(mean_aggreg_df.shape,std_aggreg_df.shape))
aggreg_df = pd.merge(mean_aggreg_df,std_aggreg_df)
return stacked_df,aggreg_df, list_experiments
def reader_results_GWdictionary(dataset_name:str,
str_selection:list,
excluded_str_selection:list,
unmixing_validation:bool=False,
parameters:list =['Ntarget','lr','init_mode_graph','batch_size','algo_seed','l2_reg'],
aggreg_params:str='algo_seed',
compute_statistics:bool=True,
target_resfile:str ='res_clustering.csv',
target_unmixingsfile:str= 'unmixings.npy' ,
verbose:bool=False):
abs_path = os.path.abspath('../results/')
full_path ='%s/%s/'%(abs_path,dataset_name)
list_experiments = []
res = {}
for p in parameters:
res[p]=[]
res['loss'], res['RI'], res['best_RI']= [],[],[]
if compute_statistics:
res['involved_components']=[]
res['mean_components']=[]
res['min_components']=[]
res['max_components']=[]
for subrepo in os.listdir(full_path):
if np.all([str_ in subrepo for str_ in str_selection]) and (not np.any([str_ in subrepo for str_ in excluded_str_selection])):
local_path='%s/%s/'%(full_path,subrepo)
try:
# load necessary files
settings = pd.read_csv(local_path+'/settings')
if not unmixing_validation:
local_res = pd.read_csv(local_path+target_resfile)
if compute_statistics:
unmixings = np.load(local_path+target_unmixingsfile)
else:
local_res=
|
pd.read_csv(local_path +'/res_clustering_100seeds.csv')
|
pandas.read_csv
|
from ...utils import constants
import pandas as pd
import geopandas as gpd
import numpy as np
import shapely
import pytest
from contextlib import ExitStack
from sklearn.metrics import mean_absolute_error
from ...preprocessing import detection, clustering
from ...models.sts_epr import STS_epr
from ...core.trajectorydataframe import TrajDataFrame
from ...models.markov_diary_generator import MarkovDiaryGenerator
def global_variables():
# tessellation
tess_polygons = [[[7.481, 45.184],
[7.481, 45.216],
[7.526, 45.216],
[7.526, 45.184],
[7.481, 45.184]],
[[7.481, 45.216],
[7.481, 45.247],
[7.526, 45.247],
[7.526, 45.216],
[7.481, 45.216]],
[[7.526, 45.184],
[7.526, 45.216],
[7.571, 45.216],
[7.571, 45.184],
[7.526, 45.184]],
[[7.526, 45.216],
[7.526, 45.247],
[7.571, 45.247],
[7.571, 45.216],
[7.526, 45.216]]]
geom = [shapely.geometry.Polygon(p) for p in tess_polygons]
tessellation = gpd.GeoDataFrame(geometry=geom, crs="EPSG:4326")
tessellation = tessellation.reset_index().rename(columns={"index": constants.TILE_ID})
relevance = np.random.randint(5, 10, size=len(tessellation))
tessellation[constants.RELEVANCE] = relevance
social_graph = [[0,1],[0,2],[0,3],[1,3],[2,4]]
# mobility diary generator
lats_lngs = np.array([[39.978253, 116.3272755],
[40.013819, 116.306532],
[39.878987, 116.1266865],
[40.013819, 116.306532],
[39.97958, 116.313649],
[39.978696, 116.3262205],
[39.98153775, 116.31079],
[39.978161, 116.3272425],
[38.978161, 115.3272425]])
traj = pd.DataFrame(lats_lngs, columns=[constants.LATITUDE, constants.LONGITUDE])
traj[constants.DATETIME] = pd.to_datetime([
'20130101 8:34:04', '20130101 10:34:08', '20130105 10:34:08',
'20130110 12:34:15', '20130101 1:34:28', '20130101 3:34:54',
'20130101 4:34:55', '20130105 5:29:12', '20130115 00:29:12'])
traj[constants.UID] = [1 for _ in range(5)] + [2 for _ in range(3)] + [3]
tdf = TrajDataFrame(traj)
ctdf = clustering.cluster(tdf)
mdg = MarkovDiaryGenerator()
mdg.fit(ctdf, 3, lid='cluster')
return tessellation, social_graph, mdg
tessellation, social_graph, mdg = global_variables()
sts_epr = STS_epr()
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('diary_generator', [mdg])
@pytest.mark.parametrize('social_graph', [social_graph, 'random'])
@pytest.mark.parametrize('n_agents', [1,5])
@pytest.mark.parametrize('rsl', [True, False])
@pytest.mark.parametrize('relevance_column',['relevance'])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
# First test set: CORRECT arguments, no ERRORS expected (#test: 8)
def test_sts_generate_success(start_date, end_date, spatial_tessellation, diary_generator,
social_graph, n_agents, rsl, relevance_column, random_state, show_progress):
sts_epr = STS_epr()
tdf = sts_epr.generate(start_date=start_date, end_date=end_date, spatial_tessellation=spatial_tessellation,
social_graph=social_graph, diary_generator=diary_generator, n_agents= n_agents, rsl=rsl,
relevance_column=relevance_column, random_state=random_state, show_progress=show_progress)
assert isinstance(tdf, TrajDataFrame)
# Second test set: WRONG arguments, expected to FAIL
# test 2.1: wrong n_agents (#test: 3)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('diary_generator', [mdg])
@pytest.mark.parametrize('social_graph', [social_graph])
@pytest.mark.parametrize('n_agents', [-2,-1,0])
@pytest.mark.parametrize('rsl', [True])
@pytest.mark.parametrize('relevance_column',['relevance'])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=ValueError)
def test_sts_wrong_n_agents(start_date, end_date, spatial_tessellation, diary_generator,
social_graph, n_agents, rsl, relevance_column, random_state, show_progress):
sts_epr = STS_epr()
tdf = sts_epr.generate(start_date=start_date, end_date=end_date, spatial_tessellation=spatial_tessellation,
social_graph=social_graph, diary_generator=diary_generator, n_agents= n_agents, rsl=rsl,
relevance_column=relevance_column, random_state=random_state, show_progress=show_progress)
assert isinstance(tdf, TrajDataFrame)
# test 2.2: end_date prior to start_date (#test: 1)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('diary_generator', [mdg])
@pytest.mark.parametrize('social_graph', [social_graph])
@pytest.mark.parametrize('n_agents', [5])
@pytest.mark.parametrize('rsl', [True])
@pytest.mark.parametrize('relevance_column',['relevance'])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=ValueError)
def test_sts_wrong_dates(start_date, end_date, spatial_tessellation, diary_generator,
social_graph, n_agents, rsl, relevance_column, random_state, show_progress):
sts_epr = STS_epr()
tdf = sts_epr.generate(start_date=start_date, end_date=end_date, spatial_tessellation=spatial_tessellation,
social_graph=social_graph, diary_generator=diary_generator, n_agents= n_agents, rsl=rsl,
relevance_column=relevance_column, random_state=random_state, show_progress=show_progress)
# test 2.3: wrong rsl type (#test: 3)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('diary_generator', [mdg])
@pytest.mark.parametrize('social_graph', [social_graph])
@pytest.mark.parametrize('n_agents', [5])
@pytest.mark.parametrize('rsl', [1, None, 'True'])
@pytest.mark.parametrize('relevance_column',['relevance'])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=TypeError)
def test_sts_wrong_rsl_type(start_date, end_date, spatial_tessellation, diary_generator,
social_graph, n_agents, rsl, relevance_column, random_state, show_progress):
sts_epr = STS_epr()
tdf = sts_epr.generate(start_date=start_date, end_date=end_date, spatial_tessellation=spatial_tessellation,
social_graph=social_graph, diary_generator=diary_generator, n_agents= n_agents, rsl=rsl,
relevance_column=relevance_column, random_state=random_state, show_progress=show_progress)
# test 2.4: wrong type for the spatial_tessellation (#test: 5)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', ["", None, [], "tessellation", [1,2,3]])
@pytest.mark.parametrize('diary_generator', [mdg])
@pytest.mark.parametrize('social_graph', [social_graph])
@pytest.mark.parametrize('n_agents', [5])
@pytest.mark.parametrize('rsl', [True])
@pytest.mark.parametrize('relevance_column',['relevance'])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=TypeError)
def test_sts_wrong_tex_type(start_date, end_date, spatial_tessellation, diary_generator,
social_graph, n_agents, rsl, relevance_column, random_state, show_progress):
sts_epr = STS_epr()
tdf = sts_epr.generate(start_date=start_date, end_date=end_date, spatial_tessellation=spatial_tessellation,
social_graph=social_graph, diary_generator=diary_generator, n_agents= n_agents, rsl=rsl,
relevance_column=relevance_column, random_state=random_state, show_progress=show_progress)
# test 2.5: # of tiles in spatial_tessellation < 3 (#test: 3)
@pytest.mark.parametrize('start_date', [
|
pd.to_datetime('2020/01/01 08:00:00')
|
pandas.to_datetime
|
from pathlib import Path
import re
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
Series,
_testing as tm,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
from pandas.io.pytables import TableIterator
pytestmark = pytest.mark.single
def test_read_missing_key_close_store(setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with HDFStore(path, "r") as store:
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
read_hdf(store, "k1")
def test_read_column(setup_path):
df =
|
tm.makeTimeDataFrame()
|
pandas._testing.makeTimeDataFrame
|
# -*- coding: utf-8 -*-
###########################################################
# <NAME>'s EQ event plotter #
###########################################################
# NOTE: This scrpit is extremely resource-heavy. #
# It's a good idea not to run it on RaspberryPi or #
# other machines with limited available system resources. #
###########################################################
# Copyleft <NAME>, 2018 #
###########################################################
###########################################################
# Begin easily editable variables #
###########################################################
# YOUR STATION(S) GO HERE: #
###########################################################
# This variable below tells obspy which Raspberry Shake station(s) to look for.
# Your station code should be a 5-character alphanumeric sequence available on your raspberryshake's local web page.
# If you do not know your station code, please contact the forums, or the Raspberry Shake team for help.
# NOTE: all station names should be strings separated by commas (i.e. enclosed in quotation marks)
# a single station will look like: YOUR_STATIONS = ('RCB43')
# multiple stations will look like: YOUR_STATIONS = ('RCB43', 'R06AC', 'R35E7')
YOUR_STATIONS = ('RCB43', 'R4989', 'R6A3B')#, 'R35E7')
###########################################################
# YOUR LAT AND LONG GO HERE: #
###########################################################
# you don't need to be exact, just general lat/lon to center your maps and gather local earthquake data
# if you don't know them, you can go to google maps and click on an uninhabited area near your town, away from roads
# negative latitude indicates southern hemisphere
YOUR_LATITUDE = 44.0
# negative longitude indicates western hemisphere
YOUR_LONGITUDE = -70.0
###########################################################
# OTHER USEFUL KNOBS: #
###########################################################
# Things to adjust and play with
DURATION = 35 # earthquake search in days before now
LOCAL_RADIUS = 10 # IRIS search radius for local eqs, in degrees
LOCAL_MAG = -1 # minimum local eq magnitude
REGIONAL_RADIUS = 30 # search radius for regional eqs, in degrees
REGIONAL_MAG = 4.0 # minimum regional eq magnitude
NRCAN_RADIUS = 1300 # EarthquakesCanada search radius kilometers
NRCAN_DEG = NRCAN_RADIUS / 110
GLOBAL_MIN_MAG = 5.59 # minimum global eq magnitude
###########################################################
# End of easily editable variables #
###########################################################
# progress definitions
p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10 = 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100
print('#########################')
print('# PLOTEVENTS #')
print('# (ɔ) 2018 <NAME> #')
print('#########################')
print('Using lat %s and lon %s as home.' % (YOUR_LATITUDE, YOUR_LONGITUDE))
print('Parameters:')
print('Local earthquakes larger than M%s within %s degrees' % (LOCAL_MAG, LOCAL_RADIUS))
print('Regional earthquakes larger than M%s within %s and %s degrees' % (REGIONAL_MAG, LOCAL_RADIUS, REGIONAL_RADIUS))
print('Canadian earthquakes within %s km' % (NRCAN_RADIUS))
print('Global earthquakes larger than M%s' % (GLOBAL_MIN_MAG))
print('Stations:')
print(YOUR_STATIONS)
print('')
print('%s%% - Importing resources...' % (p0))
from obspy import read_inventory, read_events
from requests.exceptions import ConnectionError
from obspy.clients.fdsn.client import Client
from obspy.clients.fdsn.header import FDSNNoDataException
from obspy.core.event.catalog import Catalog
from obspy.core.event import Event
from obspy.core.event.event import EventDescription
from obspy.core.event.origin import Origin
from obspy.core.event.magnitude import Magnitude
from obspy.core.event.base import QuantityError, CreationInfo
from obspy.core.inventory.network import Network
from obspy.core.inventory.inventory import Inventory
from obspy.core import UTCDateTime
from datetime import datetime, timedelta
from mpl_toolkits.basemap import Basemap
import pandas as pd
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import csv
import urllib2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pickle
import re # Import the regular expression library
Exclusions = [
'a', 'an', 'the', # Articles
'and', 'but', 'or', 'by', 'nor', 'yet', 'so', # Conjunctions
'from', 'near', 'off', 'upon', 'of', # Prepositions
'region', 'reg',
'km',
]
AllCaps = [
'SW', 'SE', 'NW', 'NE', # Sub-cardinal directions
'QC', 'NB', 'NS', 'ON', 'AB', 'BC', 'MB', 'NL', 'NT', 'NU', 'PE', 'SK', 'YT', # Canadian territories
'US', 'U.', 'S.', # United States
]
def TitleCase(curText=str):
""" Take a string and return it in a fashion that follows proper title case guidelines """
outString = ""
fragments = re.split( r'(\".*?\")|(\'.*?\')|(“.*?”)|(‘.*?’)', curText ) # Extract titles in quotation marks from string
for fragment in fragments: # Treat and re-assemble all fragments
if fragment: # skip empty matches generated by the OR in regex
fragString = ""
tokens = fragment.split(); # Break string into individual words
if tokens:
for word in tokens: # Check each word
punct = word[-1] # Check for trailing punctuation mark
if punct.isalpha():
punct = ""
else:
word = word[:-1]
if word.lower() in Exclusions: # if it is excluded,
fragString += word.lower() + punct + " " # make it lowercase
elif word.upper() in AllCaps:
fragString += word.upper() + punct + " "
else: # otherwise,
fragString += word.capitalize() + punct + " " # capitalize it
cap = 1
if not fragString[0].isalpha():
cap = 2
outString += ( fragString[:cap].upper() + fragString[cap:]).strip() + " "
return (outString[:1].upper() + outString[1:]).strip() # Capitalize first letter and strip trailing space
print('%s%% - Importing IRIS FDSN client...' % (p1))
iris = Client("IRIS")
t2 = UTCDateTime.now()
t2str = t2.strftime('%Y-%m-%d %H:%M UTC')
t1 = t2 - timedelta(days=DURATION)
cat = Catalog()
nrcat = Catalog()
cat2 = Catalog()
####### LOCAL ########
try:
print('%s%% - Getting local earthquakes within %s degrees from IRIS...' % (p2, LOCAL_RADIUS))
cat += iris.get_events(starttime=t1, endtime=t2, latitude=YOUR_LATITUDE,
longitude=YOUR_LONGITUDE, minmagnitude=LOCAL_MAG, maxradius=LOCAL_RADIUS)
for evt in cat:
evt['event_descriptions'][0]['text'] = TitleCase(evt['event_descriptions'][0]['text'].split('/')[0])
try:
evt['magnitudes'][0]['creation_info']['author'] = evt['origins'][0]['creation_info']['author'].split(',')[0]
except TypeError as e:
au = evt['origins'][0]['creation_info']['author'].split(',')[0]
tm = evt['origins'][0]['time']
evt['magnitudes'][0]['creation_info'] = CreationInfo(author=au, time=tm)
print("%s%% - Warning: manually assigned a quake CreationInfo (author: %s, time: %s)" % (p5, au, tm))
evt['origins'][0]['depth'] = evt['origins'][0]['depth'] / 1000
cat += nrcat
print('%s%% - Found %s NRCan events. Catalog now contains %s events.' % (p5, nrcat.count(), cat.count()))
except FDSNNoDataException:
print('%s%% - No local earthquakes found in IRIS database within %s degree radius.' % (p2, LOCAL_RADIUS))
except Exception as e:
print('%s%% - Error: %s' % (p2, e))
try:
print('%s%% - Connecting to NRCan...' % (p5))
url = 'http://www.earthquakescanada.nrcan.gc.ca/api//fdsnws/event/1/query?earthquaketypes=L&starttime=%s&endtime=%s&latitude=%s&longitude=%s&minradius=0&maxradius=%s&onlyfelt=0' % (t1.strftime('%Y%%2F%m%%2F%d+%H%%3A%M%%3A%S'), t2.strftime('%Y%%2F%m%%2F%d+%H%%3A%M%%3A%S'), YOUR_LATITUDE, YOUR_LONGITUDE, NRCAN_DEG)
nrcat += read_events(pathname_or_url=url, format='QUAKEML')
print('%s%% - Query url: %s' % (p5, url))
for evt in nrcat:
evt['event_descriptions'][0]['text'] = TitleCase(evt['event_descriptions'][0]['text'].split('/')[0])
evt['magnitudes'][0]['creation_info'] = CreationInfo(author='cn', time=evt['origins'][0]['time'])
#evt['origins'][0]['depth'] = evt['origins'][0]['depth'] * 1000
cat += nrcat
print('%s%% - Found %s NRCan events. Catalog now contains %s events.' % (p5, nrcat.count(), cat.count()))
# except ConnectionError:
# print('%s%% - Could not connect to NRCan.' % (p5))
except Exception as e:
print('%s%% - An error occurred parsing data from NRCan. Error: %s' % (p5, e))
print('%s%% - Local IRIS/NRCAN catalog:' % (p4))
print(cat.__str__(print_all=True))
####### REGIONAL ########
try:
print('%s%% - Getting regional earthquakes above M%s between %s and %s degrees...' % (p3, REGIONAL_MAG, LOCAL_RADIUS, REGIONAL_RADIUS))
cat2 += iris.get_events(starttime=t1, endtime=t2, latitude=YOUR_LATITUDE,
longitude=YOUR_LONGITUDE, minmagnitude=REGIONAL_MAG, minradius=LOCAL_RADIUS, maxradius=REGIONAL_RADIUS)
except FDSNNoDataException:
print('%s%% - No %s+ earthquakes found between %s and %s degree radii.' % (p3, REGIONAL_MAG, LOCAL_RADIUS, REGIONAL_RADIUS))
except Exception as e:
print('%s%% - Error: %s' % (p3, e))
try:
for evt in cat2:
evt['event_descriptions'][0]['text'] = TitleCase(evt['event_descriptions'][0]['text'])
evt['origins'][0]['depth'] = evt['origins'][0]['depth']
try:
evt['magnitudes'][0]['creation_info']['author'] = evt['origins'][0]['creation_info']['author'].split(',')[0]
except TypeError as e:
au = evt['origins'][0]['creation_info']['author'].split(',')[0]
tm = evt['origins'][0]['time']
evt['magnitudes'][0]['creation_info'] = CreationInfo(author=au, time=tm)
print("%s%% - Warning: manually assigned a quake CreationInfo (author: %s, time: %s)" % (p5, au, tm))
except Exception as e:
print('%s%% - Error parsing IRIS local/regional catalog: %s' % (p5, e))
####### GLOBAL ########
try:
# cat2 += iris.get_events(starttime=t1, endtime=t2, latitude=YOUR_LATITUDE,
# longitude=YOUR_LONGITUDE, minmagnitude=5, maxmagnitude=5.59, minradius=12, maxradius=50)
print('%s%% - Getting global earthquakes larger than M%s...' % (p5, GLOBAL_MIN_MAG))
cat2 += iris.get_events(starttime=t1, endtime=t2, minmagnitude=GLOBAL_MIN_MAG)
except FDSNNoDataException:
print('%s%% - No %s+ earthquakes in global database.' % (p5, GLOBAL_MIN_MAG))
except Exception as e:
print('%s%% - Error querying IRIS database: %s' % (p5, e))
try:
for evt in cat2:
evt['event_descriptions'][0]['text'] = TitleCase(evt['event_descriptions'][0]['text'])
evt['origins'][0]['depth'] = evt['origins'][0]['depth'] / 1000.
try:
evt['magnitudes'][0]['creation_info']['author'] = evt['origins'][0]['creation_info']['author'].split(',')[0]
except TypeError as e:
au = evt['origins'][0]['creation_info']['author'].split(',')[0]
tm = evt['origins'][0]['time']
evt['magnitudes'][0]['creation_info'] = CreationInfo(author=au, time=tm)
print("%s%% - Warning: manually assigned a quake CreationInfo (author: %s, time: %s)" % (p5, au, tm))
except Exception as e:
print('%s%% - Error parsing IRIS global catalog: %s' % (p5, e))
print('%s%% - Regional/global IRIS catalog:' % (p6))
print(cat2.__str__(print_all=True))
####### CATALOG WRITING ########
print('%s%% - Writing catalogs. Local count: %s; global count: %s' % (p6, cat.count(), cat2.count()))
cat.write('evtlocal30days.xml', format='QUAKEML')
cat2.write('evtmajor30days.xml', format='QUAKEML')
# put into pandas dataframe and plot seismicity rate
print('%s%% - Creating DataFrame...' % (p6))
times = []
lats = []
lons = []
note = []
deps = []
magnitudes = []
magnitudestype = []
for event in cat:
if len(event.origins) != 0 and len(event.magnitudes) != 0:
times.append(event.origins[0].time.datetime)
lats.append(event.origins[0].latitude)
lons.append(event.origins[0].longitude)
note.append(event.event_descriptions[0].text)
deps.append(event.origins[0].depth)
magnitudes.append(event.magnitudes[0].mag)
magnitudestype.append(event.magnitudes[0].magnitude_type)
df =
|
pd.DataFrame({'lat':lats, 'lon':lons, 'depth':deps, 'desc':note, 'mag':magnitudes, 'type':magnitudestype,}, index = times)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import os
import xml.etree.ElementTree as ET
# In[2]:
#Global
studyNames = ['Framingham', 'JHS', 'COPDGene', 'MESA', 'Amish', 'CARDIA', 'HCHS', 'WHI', 'ARIC', 'CFS', 'CHS',
'Mayo_VTE', 'GOLDN', 'GenSALT', 'CCAF', 'SAS', 'SAGE', 'CRA', 'VAFAR', 'MGH_AF', 'HVH', 'Partners',
'VU_AF', 'WGHS', 'GALAII', 'GeneSTAR', 'BAGS', 'Sarcoidosis', 'SAFS', 'GENOA', 'HyperGEN', 'THRV',
'DHS']
dataPath = '/data/'
dictPath = '/dict/'
# In[7]:
'''A helper function that finds all the positions of a given character in a string'''
def getAllPositionsOfCharacterInString(string, char):
return [i for i, letter in enumerate(string) if letter == char]
'''It uses a simple pattern search to find the corresponding data files (Multiple consents) for each dict file'''
def getCorrespondingDataFilesForDictFile(dictFile, dataFilePath):
cutOffPosition = getAllPositionsOfCharacterInString(dictFile, '.')[3]
dataFileNamePrefix = dictFile[0:cutOffPosition]
dataFiles = [fn for fn in os.listdir(dataFilePath) if fn.startswith(dataFileNamePrefix)]
return dataFiles
def getPatientCountForVariable(data_df, colIndex):
return (data_df.iloc[:, colIndex]).count()
'''The function that does it all.
STEP 1: Opens dict files in turn.
STEP 2: Extracts the variables from each dict file.
STEP 3: For each variable, it looks for patients entering values for that variable in the corresponding data file
STEP 4: Also does a check if at least one patient entered a value for the variable or not
RETURNS two different dataframes'''
def getPatientCountsForEveryVariable(studyName):
dictFilePath = studyName + dictPath
dataFilePath = studyName + dataPath
dictFiles = [f for f in os.listdir(dictFilePath) if os.path.isfile(os.path.join(dictFilePath, f))]
columnList = ['STUDY', 'PHV', 'LABEL', 'PATIENT_COUNT', 'DESCRIPTION']
variables_df =
|
pd.DataFrame(columns=columnList)
|
pandas.DataFrame
|
import datetime as dtm
from functools import lru_cache
from typing import Optional, Callable
import pandas as pd
from .base_classes import SingleFinancialSymbolSource, FinancialSymbolsSource
from .._settings import data_url, change_column_name
from ..common.enums import Currency, SecurityType, Period
from ..common.financial_symbol import FinancialSymbol
from ..common.financial_symbol_id import FinancialSymbolId
from ..common.financial_symbol_info import FinancialSymbolInfo
class CbrTopRatesSource(SingleFinancialSymbolSource):
def _load_rates(self):
df = pd.read_csv('{}cbr_deposit_rate/data.csv'.format(data_url), sep='\t')
df.sort_values(by='decade', inplace=True)
df.rename(columns={'close': change_column_name, 'decade': 'date'},
inplace=True)
return df
def _load_dates(self, kind):
index = pd.read_csv('{}cbr_deposit_rate/__index.csv'.format(data_url), sep='\t')
period_str = index[kind][0]
return pd.Period(period_str, freq='M')
def __init__(self):
super().__init__(
namespace='cbr',
name='TOP_rates',
values_fetcher=lambda: self._load_rates(),
start_period=self._load_dates(kind='date_start'),
end_period=self._load_dates(kind='date_end'),
long_name='Динамика максимальной процентной ставки (по вкладам в российских рублях)',
currency=Currency.RUB,
security_type=SecurityType.RATES,
period=Period.DECADE,
adjusted_close=False,
)
class CbrCurrenciesSource(FinancialSymbolsSource):
def __init__(self):
super().__init__(namespace='cbr')
self.url_base = data_url + 'currency/'
self.index = pd.read_csv(self.url_base + '__index.csv', sep='\t', index_col='name')
self.__short_names = {
Currency.RUB: 'Рубль РФ',
Currency.USD: 'Доллар США',
Currency.EUR: 'Евро',
}
self._currency_min_date = {
Currency.RUB.name: pd.Period('1990', freq='D'),
Currency.USD.name: pd.Period('1913', freq='D'),
Currency.EUR.name:
|
pd.Period('1996', freq='D')
|
pandas.Period
|
import json
import numpy as np
import pandas as pd
from string import punctuation
from collections import Counter
from bs4 import BeautifulSoup
import os
import requests
import re
def process_politician_db():
member_info = pd.read_csv('tweet_data/parliamentarians/full_member_info.csv', encoding='utf-16',
engine='python')
tweets_data = pd.DataFrame()
with open('tweet_data/parliamentarians/all_tweet_ids.jsonl', encoding='utf-8') as f:
i = 0
for line in f:
data = json.loads(line)
if data['lang'] == 'en':
print(i)
reduced = {'full_text':data['full_text'], 'id':data['id']}
tweets_data = tweets_data.append(reduced, ignore_index=True)
i += 1
tweets_data.rename(columns={'id':'uid'}, inplace=True)
member_info_filtered = member_info[['country', 'name', 'party', 'left_right', 'uid']].copy()
df = pd.merge(member_info_filtered, tweets_data, how='left', on='uid')
return df
def process_tweets(fpath):
""" Processes tweets into a Pandas DataFrame
Args:
fpath (string): Relative filepath to tweet JSON
Returns:
df_tweet (DataFrame) DataFrame with tweet text and user_id information
"""
path = os.path.relpath(fpath)
tweets_file = open(path)
line = tweets_file.readline()
tweet_dict_list = []
while line:
tweet_dict = json.loads(line)
clean_tweets_dict = {
'text' : tweet_dict['text'],
'user_id' : tweet_dict['user_id']
}
tweet_dict_list.append(clean_tweets_dict)
line = tweets_file.readline()
return pd.DataFrame(tweet_dict_list)
def process_users(fpath):
""" Processes twitter users into a DataFrame
Args:
fpath (string): Relative filepath to users JSON
Returns:
df_user (DataFrame): DataFrame with user id and name information
"""
path = os.path.relpath(fpath)
users_file = open(path)
line = users_file.readline()
user_dict_list = []
while line:
user_dict = json.loads(line)
clean_user_dict = {
'user_id' : user_dict['id'],
'name' : user_dict['name']
}
user_dict_list.append(clean_user_dict)
line = users_file.readline()
return pd.DataFrame(user_dict_list)
def join_tweets_users(df_tweet, df_user):
""" Joins a dataframe of tweets with a dataframe of users by matching user ids to tweets
Args:
df_tweet (DataFrame): A dataframe containing tweet text with user ids
df_user (DataFrame): A dataframe containing user name and id
Returns:
df_tweet_user (DataFrame): A dataframe containing tweet text, user id, and user name
"""
df_tweet_user = df_tweet.merge(df_user, on='user_id', how='left')
return df_tweet_user
def json_to_df(fpath):
""" Parses a JSON to a dataframe
Args:
fpath (string): Relative filepath to a JSON representation of a dataframe
Returns:
df (DataFrame): DataFrame representation of JSON data found at filepath
"""
path = os.path.relpath(fpath)
return pd.read_json(path)
def csv_to_df(fpath):
""" Parses a JSON to a dataframe
Args:
fpath (string): Relative filepath to a CSV representation of a dataframe
Returns:
df (DataFrame): DataFrame representation of CSV data found at filepath
"""
path = os.path.relpath(fpath)
return
|
pd.read_csv(fpath)
|
pandas.read_csv
|
from pandas.compat.numpy.function import validate_argsort_with_ascending
import pandas as pd
from PythonClass.common_func import CommonFunc
# tuple
# - 리스트 변수와는 다르게 변경이 불가능하다.
a = (1, 2, 3)
b = 1, 2, 3
print(type(a), type(b))
print('')
print('====================================================================================================')
print('== 문제 80. 아래와 같은 상황일 때 player 1 이 둘때 가장 유리한 수가 무엇인가?')
print('====================================================================================================')
# select c5,
# max(val_new)
# from ttt_data
# where c1 = 1 and c2 = 2 and c3 = 1 and c4 = 2 and c7 = 1 and c9 = 2
#
# c5;
#
# select c6,
# max(val_new)
# from ttt_data
# where c1 = 1 and c2 = 2 and c3 = 1 and c4 = 2 and c7 = 1 and c9 = 2
# group by c6;
#
# select c8,
# max(val_new)
# from ttt_data a
# where c1 = 1 and c2 = 2 and c3 = 1 and c4 = 2 and c7 = 1 and c9 = 2
# group by c8;
print('')
print('====================================================================================================')
print('== 문제 81. mit_ttt 학습 데이터를 파일명을 간단하게 변경하고 전체 데이터를 파이썬에서 출력하세요')
print('====================================================================================================')
for mitData in CommonFunc.returnCsvData('mit_ttt.csv'):
print(mitData)
print('')
print('====================================================================================================')
print('== 문제 82. pandas 모듈을 이용해서 사원 테이블에서 최대 월급을 출력하시오.')
print('====================================================================================================')
emp = pd.DataFrame.from_csv('D:\\KYH\\02.PYTHON\\data\\emp.csv')
print(emp['sal'].max())
print('')
print('====================================================================================================')
print('== 문제 83. 직업이 SALESMAN 인 사원들의 이름과 월급과 직업을 출력하시오.')
print('====================================================================================================')
print(emp[['ename', 'sal']][emp['job'] == 'SALESMAN'])
#열 #행
print('')
print('====================================================================================================')
print('== 문제 84. 월급이 3000 이상인 사원들의 이름과 월급을 출력하시오!')
print('====================================================================================================')
result = emp[['ename', 'sal']][emp['sal'] >= 3000]
print(result)
print('')
print('====================================================================================================')
print('== 문제 85. 월급이 3000 이상인 사원들의 이름과 월급을 출력하는데 월급이 낮은 사원부터 출력하시오!')
print('====================================================================================================')
result = emp[['ename', 'sal']][emp['sal'] >= 3000].sort_values('sal', ascending=True)
print(result)
print('')
print('====================================================================================================')
print('== 문제 86. 부서번호가 20번인 사원들의 최대월급을 출력하시오!')
print('====================================================================================================')
result = emp[['sal']][emp['deptno'] == 20].max()
print(result)
print('')
print('====================================================================================================')
print('== 문제 87. 직업, 직업별 토탈월급을 출력하시오!')
print('====================================================================================================')
result = emp.groupby('job')['sal'].sum()
print(result)
print('')
print('====================================================================================================')
print('== 문제 88. 부서번호, 부서번호별 평균월급을 출력하시오!')
print('====================================================================================================')
result = emp.groupby('deptno')['sal'].mean()
print(result)
print('')
print('====================================================================================================')
print('== 문제 89. 아래의 결과를 pandas 로 구현하시오!')
print('====================================================================================================')
mit =
|
pd.DataFrame.from_csv('D:\\KYH\\02.PYTHON\\data\\mit_ttt2.csv')
|
pandas.DataFrame.from_csv
|
"""
Naives and Others Requiring No Additional Packages Beyond Numpy and Pandas
"""
from math import ceil
import warnings
import random
import datetime
import numpy as np
import pandas as pd
from autots.models.base import ModelObject, PredictionObject
from autots.tools import seasonal_int
from autots.tools.probabilistic import Point_to_Probability, historic_quantile
# optional requirement
try:
from scipy.spatial.distance import cdist
except Exception:
pass
class ZeroesNaive(ModelObject):
"""Naive forecasting predicting a dataframe of zeroes (0's)
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
"""
def __init__(
self,
name: str = "ZeroesNaive",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2020,
verbose: int = 0,
**kwargs
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied
Args:
df (pandas.DataFrame): Datetime Indexed
"""
df = self.basic_profile(df)
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self, forecast_length: int, future_regressor=None, just_point_forecast=False
):
"""Generates forecast data immediately following dates of index supplied to .fit()
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
df = pd.DataFrame(
np.zeros((forecast_length, (self.train_shape[1]))),
columns=self.column_names,
index=self.create_forecast_index(forecast_length=forecast_length),
)
if just_point_forecast:
return df
else:
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=df.index,
forecast_columns=df.columns,
lower_forecast=df,
forecast=df,
upper_forecast=df,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Returns dict of new parameters for parameter tuning"""
return {}
def get_params(self):
"""Return dict of current parameters"""
return {}
class LastValueNaive(ModelObject):
"""Naive forecasting predicting a dataframe of the last series value
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
"""
def __init__(
self,
name: str = "LastValueNaive",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2020,
**kwargs
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
)
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied
Args:
df (pandas.DataFrame): Datetime Indexed
"""
df = self.basic_profile(df)
self.last_values = df.tail(1).to_numpy()
# self.df_train = df
self.lower, self.upper = historic_quantile(
df, prediction_interval=self.prediction_interval
)
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self, forecast_length: int, future_regressor=None, just_point_forecast=False
):
"""Generates forecast data immediately following dates of index supplied to .fit()
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
df = pd.DataFrame(
np.tile(self.last_values, (forecast_length, 1)),
columns=self.column_names,
index=self.create_forecast_index(forecast_length=forecast_length),
)
if just_point_forecast:
return df
else:
# upper_forecast, lower_forecast = Point_to_Probability(self.df_train, df, prediction_interval = self.prediction_interval, method = 'historic_quantile')
upper_forecast = df.astype(float) + (self.upper * 0.8)
lower_forecast = df.astype(float) - (self.lower * 0.8)
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=df.index,
forecast_columns=df.columns,
lower_forecast=lower_forecast,
forecast=df,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Returns dict of new parameters for parameter tuning"""
return {}
def get_params(self):
"""Return dict of current parameters"""
return {}
class AverageValueNaive(ModelObject):
"""Naive forecasting predicting a dataframe of the series' median values
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
"""
def __init__(
self,
name: str = "AverageValueNaive",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2020,
verbose: int = 0,
method: str = 'Median',
**kwargs
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
self.method = method
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
"""
df = self.basic_profile(df)
method = str(self.method).lower()
if method == 'median':
self.average_values = df.median(axis=0).to_numpy()
elif method == 'mean':
self.average_values = df.mean(axis=0).to_numpy()
elif method == 'mode':
self.average_values = (
df.mode(axis=0).iloc[0].fillna(df.median(axis=0)).to_numpy()
)
elif method == "midhinge":
results = df.to_numpy()
q1 = np.nanquantile(results, q=0.25, axis=0)
q2 = np.nanquantile(results, q=0.75, axis=0)
self.average_values = (q1 + q2) / 2
elif method in ["weighted_mean", "exp_weighted_mean"]:
weights = pd.to_numeric(df.index)
weights = weights - weights.min()
if method == "exp_weighted_mean":
weights = (weights / weights[weights != 0].min()) ** 2
self.average_values = np.average(df.to_numpy(), axis=0, weights=weights)
self.fit_runtime = datetime.datetime.now() - self.startTime
self.lower, self.upper = historic_quantile(
df, prediction_interval=self.prediction_interval
)
return self
def predict(
self, forecast_length: int, future_regressor=None, just_point_forecast=False
):
"""Generates forecast data immediately following dates of index supplied to .fit()
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
df = pd.DataFrame(
np.tile(self.average_values, (forecast_length, 1)),
columns=self.column_names,
index=self.create_forecast_index(forecast_length=forecast_length),
)
if just_point_forecast:
return df
else:
upper_forecast = df.astype(float) + self.upper
lower_forecast = df.astype(float) - self.lower
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=df.index,
forecast_columns=df.columns,
lower_forecast=lower_forecast,
forecast=df,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Returns dict of new parameters for parameter tuning"""
method_choice = random.choices(
[
"Mean",
"Median",
"Mode",
"Midhinge",
"Weighted_Mean",
"Exp_Weighted_Mean",
],
[0.3, 0.3, 0.01, 0.1, 0.4, 0.1],
)[0]
return {'method': method_choice}
def get_params(self):
"""Return dict of current parameters."""
return {'method': self.method}
class SeasonalNaive(ModelObject):
"""Naive forecasting predicting a dataframe with seasonal (lag) forecasts.
Concerto No. 2 in G minor, Op. 8, RV 315
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
method (str): Either 'LastValue' (use last value of lag n) or 'Mean' (avg of all lag n)
lag_1 (int): The lag of the seasonality, should int > 1.
lag_2 (int): Optional second lag of seasonality which is averaged with first lag to produce forecast.
"""
def __init__(
self,
name: str = "SeasonalNaive",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2020,
verbose: int = 0,
lag_1: int = 7,
lag_2: int = None,
method: str = 'LastValue',
**kwargs
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
self.lag_1 = abs(int(lag_1))
self.lag_2 = lag_2
if str(self.lag_2).isdigit():
self.lag_2 = abs(int(self.lag_2))
if str(self.lag_2) == str(self.lag_1):
self.lag_2 = 1
self.method = method
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
"""
df = self.basic_profile(df)
self.df_train = df
df_length = self.train_shape[0]
self.tile_values_lag_2 = None
if self.method in ['Mean', 'Median']:
tile_index = np.tile(
np.arange(self.lag_1), int(np.ceil(df_length / self.lag_1))
)
tile_index = tile_index[len(tile_index) - (df_length) :]
df.index = tile_index
if self.method == "Median":
self.tile_values_lag_1 = df.groupby(level=0, axis=0).median()
else:
self.tile_values_lag_1 = df.groupby(level=0, axis=0).mean()
if str(self.lag_2).isdigit():
if self.lag_2 == 1:
self.tile_values_lag_2 = df.tail(self.lag_2)
else:
tile_index = np.tile(
np.arange(self.lag_2), int(np.ceil(df_length / self.lag_2))
)
tile_index = tile_index[len(tile_index) - (df_length) :]
df.index = tile_index
if self.method == "Median":
self.tile_values_lag_2 = df.groupby(level=0, axis=0).median()
else:
self.tile_values_lag_2 = df.groupby(level=0, axis=0).mean()
else:
self.method == 'LastValue'
self.tile_values_lag_1 = df.tail(self.lag_1)
if str(self.lag_2).isdigit():
self.tile_values_lag_2 = df.tail(self.lag_2)
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self,
forecast_length: int,
future_regressor=None,
just_point_forecast: bool = False,
):
"""Generate forecast data immediately following dates of .fit().
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
tile_len = len(self.tile_values_lag_1.index)
df = pd.DataFrame(
np.tile(
self.tile_values_lag_1, (int(np.ceil(forecast_length / tile_len)), 1)
)[0:forecast_length],
columns=self.column_names,
index=self.create_forecast_index(forecast_length=forecast_length),
)
if str(self.lag_2).isdigit():
y = pd.DataFrame(
np.tile(
self.tile_values_lag_2,
(
int(
np.ceil(forecast_length / len(self.tile_values_lag_2.index))
),
1,
),
)[0:forecast_length],
columns=self.column_names,
index=self.create_forecast_index(forecast_length=forecast_length),
)
df = (df + y) / 2
# df = df.apply(pd.to_numeric, errors='coerce')
df = df.astype(float)
if just_point_forecast:
return df
else:
upper_forecast, lower_forecast = Point_to_Probability(
self.df_train,
df,
method='inferred_normal',
prediction_interval=self.prediction_interval,
)
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=df.index,
forecast_columns=df.columns,
lower_forecast=lower_forecast,
forecast=df,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Return dict of new parameters for parameter tuning."""
lag_1_choice = seasonal_int()
lag_2_choice = random.choices(
[None, seasonal_int(include_one=True)], [0.3, 0.7]
)[0]
if str(lag_2_choice) == str(lag_1_choice):
lag_2_choice = 1
method_choice = random.choices(
['Mean', 'Median', 'LastValue'], [0.4, 0.2, 0.4]
)[0]
return {'method': method_choice, 'lag_1': lag_1_choice, 'lag_2': lag_2_choice}
def get_params(self):
"""Return dict of current parameters."""
return {'method': self.method, 'lag_1': self.lag_1, 'lag_2': self.lag_2}
class MotifSimulation(ModelObject):
"""More dark magic created by the evil mastermind of this project.
Basically a highly-customized KNN
Warning: if you are forecasting many steps (large forecast_length), and interested in probabilistic upper/lower forecasts, then set recency_weighting <= 0, and have a larger cutoff_minimum
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
phrase_len (int): length of motif vectors to compare as samples
comparison (str): method to process data before comparison, 'magnitude' is original data
shared (bool): whether to compare motifs across all series together, or separately
distance_metric (str): passed through to sklearn pairwise_distances
max_motifs (float): number of motifs to compare per series. If less 1, used as % of length training data
recency_weighting (float): amount to the value of more recent data.
cutoff_threshold (float): lowest value of distance metric to allow into forecast
cutoff_minimum (int): minimum number of motif vectors to include in forecast.
point_method (str): summarization method to choose forecast on, 'sample', 'mean', 'sign_biased_mean', 'median'
"""
def __init__(
self,
name: str = "MotifSimulation",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2020,
phrase_len: str = '5',
comparison: str = 'magnitude_pct_change_sign',
shared: bool = False,
distance_metric: str = 'l2',
max_motifs: float = 50,
recency_weighting: float = 0.1,
cutoff_threshold: float = 0.9,
cutoff_minimum: int = 20,
point_method: str = 'median',
n_jobs: int = -1,
verbose: int = 1,
**kwargs
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
n_jobs=n_jobs,
)
self.phrase_len = phrase_len
self.comparison = comparison
self.shared = shared
self.distance_metric = distance_metric
self.max_motifs = max_motifs
self.recency_weighting = recency_weighting
self.cutoff_threshold = cutoff_threshold
self.cutoff_minimum = cutoff_minimum
self.point_method = point_method
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
"""
if abs(float(self.max_motifs)) > 1:
self.max_motifs_n = abs(int(self.max_motifs))
elif float(self.max_motifs) == 1:
self.max_motifs_n = 2
elif float(self.max_motifs) < 1:
self.max_motifs_n = int(abs(np.floor(self.max_motifs * df.shape[0])))
else:
self.max_motifs = 10
self.max_motifs_n = 10
self.phrase_n = int(self.phrase_len)
# df = df_wide[df_wide.columns[0:3]].fillna(0).astype(float)
df = self.basic_profile(df)
"""
comparison = 'magnitude' # pct_change, pct_change_sign, magnitude_pct_change_sign, magnitude, magnitude_pct_change
distance_metric = 'cityblock'
max_motifs_n = 100
phrase_n = 5
shared = False
recency_weighting = 0.1
# cutoff_threshold = 0.8
cutoff_minimum = 20
prediction_interval = 0.9
na_threshold = 0.1
point_method = 'mean'
"""
phrase_n = abs(int(self.phrase_n))
max_motifs_n = abs(int(self.max_motifs_n))
comparison = self.comparison
distance_metric = self.distance_metric
shared = self.shared
recency_weighting = float(self.recency_weighting)
# cutoff_threshold = float(self.cutoff_threshold)
cutoff_minimum = abs(int(self.cutoff_minimum))
prediction_interval = float(self.prediction_interval)
na_threshold = 0.1
point_method = self.point_method
parallel = True
if self.n_jobs in [0, 1] or df.shape[1] < 3:
parallel = False
else:
try:
from joblib import Parallel, delayed
except Exception:
parallel = False
# import timeit
# start_time_1st = timeit.default_timer()
# transform the data into different views (contour = percent_change)
original_df = None
if 'pct_change' in comparison:
if comparison in ['magnitude_pct_change', 'magnitude_pct_change_sign']:
original_df = df.copy()
df = df.replace([0], np.nan)
df = df.fillna(abs(df[df != 0]).min()).fillna(0.1)
last_row = df.tail(1)
df = (
df.pct_change(periods=1, fill_method='ffill')
.tail(df.shape[0] - 1)
.fillna(0)
)
df = df.replace([np.inf, -np.inf], 0)
# else:
# self.comparison = 'magnitude'
if 'pct_change_sign' in comparison:
last_motif = df.where(df >= 0, -1).where(df <= 0, 1).tail(phrase_n)
else:
last_motif = df.tail(phrase_n)
max_samps = df.shape[0] - phrase_n
numbers = np.random.choice(
max_samps,
size=max_motifs_n if max_motifs_n < max_samps else max_samps,
replace=False,
)
# make this faster
motif_vecs_list = []
# takes random slices of the time series and rearranges as phrase_n length vectors
for z in numbers:
rand_slice = df.iloc[
z : (z + phrase_n),
]
rand_slice = (
rand_slice.reset_index(drop=True)
.transpose()
.set_index(np.repeat(z, (df.shape[1],)), append=True)
)
# motif_vecs = pd.concat([motif_vecs, rand_slice], axis=0)
motif_vecs_list.append(rand_slice)
motif_vecs = pd.concat(motif_vecs_list, axis=0)
if 'pct_change_sign' in comparison:
motif_vecs = motif_vecs.where(motif_vecs >= 0, -1).where(motif_vecs <= 0, 1)
# elapsed_1st = timeit.default_timer() - start_time_1st
# start_time_2nd = timeit.default_timer()
# compare the motif vectors to the most recent vector of the series
from sklearn.metrics.pairwise import pairwise_distances
args = {
"cutoff_minimum": cutoff_minimum,
"comparison": comparison,
"point_method": point_method,
"prediction_interval": prediction_interval,
"phrase_n": phrase_n,
"distance_metric": distance_metric,
"shared": shared,
"na_threshold": na_threshold,
"original_df": original_df,
"df": df,
}
if shared:
comparative = pd.DataFrame(
pairwise_distances(
motif_vecs.to_numpy(),
last_motif.transpose().to_numpy(),
metric=distance_metric,
)
)
comparative.index = motif_vecs.index
comparative.columns = last_motif.columns
if not shared:
def create_comparative(motif_vecs, last_motif, args, col):
distance_metric = args["distance_metric"]
x = motif_vecs[motif_vecs.index.get_level_values(0) == col]
y = last_motif[col].to_numpy().reshape(1, -1)
current_comparative = pd.DataFrame(
pairwise_distances(x.to_numpy(), y, metric=distance_metric)
)
current_comparative.index = x.index
current_comparative.columns = [col]
return current_comparative
if parallel:
verbs = 0 if self.verbose < 1 else self.verbose - 1
df_list = Parallel(n_jobs=self.n_jobs, verbose=(verbs))(
delayed(create_comparative)(
motif_vecs=motif_vecs, last_motif=last_motif, args=args, col=col
)
for col in last_motif.columns
)
else:
df_list = []
for col in last_motif.columns:
df_list.append(
create_comparative(motif_vecs, last_motif, args, col)
)
comparative = pd.concat(df_list, axis=0)
comparative = comparative.groupby(level=[0, 1]).sum(min_count=0)
# comparative comes out of this looking kinda funny, but get_level_values works with that later
# it might be possible to reshape it to a more memory efficient design
# comparative is a df of motifs (in index) with their value to each series (per column)
if recency_weighting != 0:
rec_weights = np.repeat(
((comparative.index.get_level_values(1)) / df.shape[0])
.to_numpy()
.reshape(-1, 1)
* recency_weighting,
len(comparative.columns),
axis=1,
)
comparative = comparative.add(rec_weights, fill_value=0)
def seek_the_oracle(comparative, args, col):
# comparative.idxmax()
cutoff_minimum = args["cutoff_minimum"]
comparison = args["comparison"]
point_method = args["point_method"]
prediction_interval = args["prediction_interval"]
phrase_n = args["phrase_n"]
shared = args["shared"]
na_threshold = args["na_threshold"]
original_df = args["original_df"]
df = args["df"]
vals = comparative[col].sort_values(ascending=False)
if not shared:
vals = vals[vals.index.get_level_values(0) == col]
# vals = vals[vals > cutoff_threshold]
# if vals.shape[0] < cutoff_minimum:
vals = comparative[col].sort_values(ascending=False)
if not shared:
vals = vals[vals.index.get_level_values(0) == col]
vals = vals.head(cutoff_minimum)
pos_forecasts = pd.DataFrame()
for val_index, val_value in vals.items():
sec_start = val_index[1] + phrase_n
if comparison in ['magnitude_pct_change', 'magnitude_pct_change_sign']:
current_pos = original_df[val_index[0]].iloc[sec_start + 1 :]
else:
current_pos = df[val_index[0]].iloc[sec_start:]
pos_forecasts = pd.concat(
[pos_forecasts, current_pos.reset_index(drop=True)],
axis=1,
sort=False,
)
thresh = int(np.ceil(pos_forecasts.shape[1] * na_threshold))
if point_method == 'mean':
current_forecast = pos_forecasts.mean(axis=1)
elif point_method == 'sign_biased_mean':
axis_means = pos_forecasts.mean(axis=0)
if axis_means.mean() > 0:
pos_forecasts = pos_forecasts[
pos_forecasts.columns[~(axis_means < 0)]
]
else:
pos_forecasts = pos_forecasts[
pos_forecasts.columns[~(axis_means > 0)]
]
current_forecast = pos_forecasts.mean(axis=1)
else:
point_method = 'median'
current_forecast = pos_forecasts.median(axis=1)
# current_forecast.columns = [col]
forecast = current_forecast.copy()
forecast.name = col
current_forecast = (
pos_forecasts.dropna(thresh=thresh, axis=0)
.quantile(q=[(1 - prediction_interval), prediction_interval], axis=1)
.transpose()
)
lower_forecast = pd.Series(current_forecast.iloc[:, 0], name=col)
upper_forecast = pd.Series(current_forecast.iloc[:, 1], name=col)
return (forecast, lower_forecast, upper_forecast)
# seek_the_oracle(comparative, args, comparative.columns[0])
if parallel:
verbs = 0 if self.verbose < 1 else self.verbose - 1
df_list = Parallel(n_jobs=self.n_jobs, verbose=(verbs))(
delayed(seek_the_oracle)(comparative=comparative, args=args, col=col)
for col in comparative.columns
)
complete = list(map(list, zip(*df_list)))
else:
df_list = []
for col in comparative.columns:
df_list.append(seek_the_oracle(comparative, args, col))
complete = list(map(list, zip(*df_list)))
forecasts = pd.concat(
complete[0], axis=1
) # .reindex(self.column_names, axis=1)
lower_forecasts = pd.concat(complete[1], axis=1)
upper_forecasts = pd.concat(complete[2], axis=1)
if comparison in ['pct_change', 'pct_change_sign']:
forecasts = (forecasts + 1).replace([0], np.nan)
forecasts = forecasts.fillna(abs(df[df != 0]).min()).fillna(0.1)
forecasts = pd.concat(
[last_row.reset_index(drop=True), (forecasts)], axis=0, sort=False
).cumprod()
upper_forecasts = (upper_forecasts + 1).replace([0], np.nan)
upper_forecasts = upper_forecasts.fillna(abs(df[df != 0]).min()).fillna(0.1)
upper_forecasts = pd.concat(
[last_row.reset_index(drop=True), (upper_forecasts)], axis=0, sort=False
).cumprod()
lower_forecasts = (lower_forecasts + 1).replace([0], np.nan)
lower_forecasts = lower_forecasts.fillna(abs(df[df != 0]).min()).fillna(0.1)
lower_forecasts = pd.concat(
[last_row.reset_index(drop=True), (lower_forecasts)], axis=0, sort=False
).cumprod()
# reindex might be unnecessary but I assume the cost worth the safety
self.forecasts = forecasts
self.lower_forecasts = lower_forecasts
self.upper_forecasts = upper_forecasts
# elapsed_3rd = timeit.default_timer() - start_time_3rd
# print(f"1st {elapsed_1st}\n2nd {elapsed_2nd}\n3rd {elapsed_3rd}")
"""
In fit phase, only select motifs.
table: start index, weight, column it applies to, and count of rows that follow motif
slice into possible motifs
compare motifs (efficiently)
choose the motifs to use for each series
if not shared, can drop column part of index ref
combine the following values into forecasts
consider the weights
magnitude and percentage change
account for forecasts not running the full length of forecast_length
if longer than comparative, append na df then ffill
Profile speed and which code to improve first
Remove for loops
Quantile not be calculated until after pos_forecasts narrowed down to only forecast length
https://krstn.eu/np.nanpercentile()-there-has-to-be-a-faster-way/
"""
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self, forecast_length: int, future_regressor=None, just_point_forecast=False
):
"""Generates forecast data immediately following dates of index supplied to .fit()
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
forecasts = self.forecasts.head(forecast_length)
if forecasts.shape[0] < forecast_length:
extra_len = forecast_length - forecasts.shape[0]
empty_frame = pd.DataFrame(
index=np.arange(extra_len), columns=forecasts.columns
)
forecasts = pd.concat([forecasts, empty_frame], axis=0, sort=False).fillna(
method='ffill'
)
forecasts.columns = self.column_names
forecasts.index = self.create_forecast_index(forecast_length=forecast_length)
if just_point_forecast:
return forecasts
else:
lower_forecasts = self.lower_forecasts.head(forecast_length)
upper_forecasts = self.upper_forecasts.head(forecast_length)
if lower_forecasts.shape[0] < forecast_length:
extra_len = forecast_length - lower_forecasts.shape[0]
empty_frame = pd.DataFrame(
index=np.arange(extra_len), columns=lower_forecasts.columns
)
lower_forecasts = pd.concat(
[lower_forecasts, empty_frame], axis=0, sort=False
).fillna(method='ffill')
lower_forecasts.columns = self.column_names
lower_forecasts.index = self.create_forecast_index(
forecast_length=forecast_length
)
if upper_forecasts.shape[0] < forecast_length:
extra_len = forecast_length - upper_forecasts.shape[0]
empty_frame = pd.DataFrame(
index=np.arange(extra_len), columns=upper_forecasts.columns
)
upper_forecasts = pd.concat(
[upper_forecasts, empty_frame], axis=0, sort=False
).fillna(method='ffill')
upper_forecasts.columns = self.column_names
upper_forecasts.index = self.create_forecast_index(
forecast_length=forecast_length
)
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=forecasts.index,
forecast_columns=forecasts.columns,
lower_forecast=lower_forecasts,
forecast=forecasts,
upper_forecast=upper_forecasts,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Return dict of new parameters for parameter tuning."""
comparison_choice = np.random.choice(
a=[
'pct_change',
'pct_change_sign',
'magnitude_pct_change_sign',
'magnitude',
'magnitude_pct_change',
],
size=1,
p=[0.2, 0.1, 0.4, 0.2, 0.1],
).item()
phrase_len_choice = np.random.choice(
a=[5, 10, 15, 20, 30, 90, 360],
p=[0.2, 0.2, 0.1, 0.25, 0.1, 0.1, 0.05],
size=1,
).item()
shared_choice = np.random.choice(a=[True, False], size=1, p=[0.05, 0.95]).item()
distance_metric_choice = np.random.choice(
a=[
'other',
'hamming',
'cityblock',
'cosine',
'euclidean',
'l1',
'l2',
'manhattan',
],
size=1,
p=[0.44, 0.05, 0.1, 0.1, 0.1, 0.2, 0.0, 0.01],
).item()
if distance_metric_choice == 'other':
distance_metric_choice = np.random.choice(
a=[
'braycurtis',
'canberra',
'chebyshev',
'correlation',
'dice',
'hamming',
'jaccard',
'kulsinski',
'mahalanobis',
'minkowski',
'rogerstanimoto',
'russellrao',
# 'seuclidean',
'sokalmichener',
'sokalsneath',
'sqeuclidean',
'yule',
],
size=1,
).item()
max_motifs_choice = float(
np.random.choice(
a=[20, 50, 100, 200, 0.05, 0.2, 0.5],
size=1,
p=[0.4, 0.1, 0.2, 0.09, 0.1, 0.1, 0.01],
).item()
)
recency_weighting_choice = np.random.choice(
a=[0, 0.5, 0.1, 0.01, -0.01, 0.001],
size=1,
p=[0.5, 0.02, 0.05, 0.35, 0.05, 0.03],
).item()
# cutoff_threshold_choice = np.random.choice(
# a=[0.7, 0.9, 0.99, 1.5], size=1, p=[0.1, 0.1, 0.4, 0.4]
# ).item()
cutoff_minimum_choice = np.random.choice(
a=[5, 10, 20, 50, 100, 200, 500], size=1, p=[0, 0, 0.2, 0.2, 0.4, 0.1, 0.1]
).item()
point_method_choice = np.random.choice(
a=['median', 'mean', 'sign_biased_mean'],
size=1,
p=[0.59, 0.3, 0.1],
).item()
return {
'phrase_len': phrase_len_choice,
'comparison': comparison_choice,
'shared': shared_choice,
'distance_metric': distance_metric_choice,
'max_motifs': max_motifs_choice,
'recency_weighting': recency_weighting_choice,
'cutoff_minimum': cutoff_minimum_choice,
'point_method': point_method_choice,
}
def get_params(self):
"""Return dict of current parameters."""
return {
'phrase_len': self.phrase_len,
'comparison': self.comparison,
'shared': self.shared,
'distance_metric': self.distance_metric,
'max_motifs': self.max_motifs,
'recency_weighting': self.recency_weighting,
# 'cutoff_threshold': self.cutoff_threshold,
'cutoff_minimum': self.cutoff_minimum,
'point_method': self.point_method,
}
def looped_motif(
Xa,
Xb,
name,
r_arr=None,
window=10,
distance_metric="minkowski",
k=10,
point_method="mean",
prediction_interval=0.9,
):
"""inner function for Motif model."""
if r_arr is None:
y = Xa[:, window:]
Xa = Xa[:, :window]
else:
y = Xa[r_arr, window:]
Xa = Xa[r_arr, :window]
# model = NearestNeighbors(n_neighbors=10, algorithm='auto', metric='minkowski', n_jobs=1)
# model.fit(Xa)
# model.kneighbors(Xb)
A = cdist(Xa, Xb, metric=distance_metric)
# lowest values
idx = np.argpartition(A, k, axis=0)[:k].flatten()
# distances for weighted mean
results = y[idx]
if point_method == "weighted_mean":
weights = A[idx].flatten()
if weights.sum() == 0:
weights = None
forecast = np.average(results, axis=0, weights=weights)
elif point_method == "mean":
forecast = np.nanmean(results, axis=0)
elif point_method == "median":
forecast = np.nanmedian(results, axis=0)
elif point_method == "midhinge":
q1 = np.nanquantile(results, q=0.25, axis=0)
q2 = np.nanquantile(results, q=0.75, axis=0)
forecast = (q1 + q2) / 2
pred_int = (1 - prediction_interval) / 2
upper_forecast = np.nanquantile(results, q=(1 - pred_int), axis=0)
lower_forecast = np.nanquantile(results, q=pred_int, axis=0)
forecast = pd.Series(forecast)
forecast.name = name
upper_forecast = pd.Series(upper_forecast)
upper_forecast.name = name
lower_forecast = pd.Series(lower_forecast)
lower_forecast.name = name
return (forecast, upper_forecast, lower_forecast)
class Motif(ModelObject):
"""Forecasts using a nearest neighbors type model adapted for probabilistic time series.
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
"""
def __init__(
self,
name: str = "Motif",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = 1,
window: int = 5,
point_method: str = "weighted_mean",
distance_metric: str = "minkowski",
k: int = 10,
max_windows: int = 5000,
multivariate: bool = False,
**kwargs
):
ModelObject.__init__(
self,
"MultivariateMotif" if multivariate else "UnivariateMotif",
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
self.window = window
self.point_method = point_method
self.distance_metric = distance_metric
self.k = k
self.max_windows = max_windows
self.multivariate = multivariate
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
"""
df = self.basic_profile(df)
self.df = df
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self, forecast_length: int, future_regressor=None, just_point_forecast=False
):
"""Generates forecast data immediately following dates of index supplied to .fit()
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
# keep this at top so it breaks quickly if missing version
x = np.lib.stride_tricks.sliding_window_view(
self.df.to_numpy(), self.window + forecast_length, axis=0
)
test_index = self.create_forecast_index(forecast_length=forecast_length)
# subsample windows if needed
r_arr = None
if self.max_windows is not None:
if self.multivariate:
X_size = x.shape[0] * x.shape[1]
else:
X_size = x.shape[0]
if self.max_windows < X_size:
r_arr = np.random.default_rng(self.random_seed).integers(
0, X_size, size=self.max_windows
)
self.parallel = True
if self.n_jobs in [0, 1] or self.df.shape[1] < 5:
self.parallel = False
else:
try:
from joblib import Parallel, delayed
except Exception:
self.parallel = False
# joblib multiprocessing to loop through series
if self.parallel:
df_list = Parallel(n_jobs=(self.n_jobs - 1))(
delayed(looped_motif)(
Xa=x.reshape(-1, x.shape[-1]) if self.multivariate else x[:, i],
Xb=self.df.iloc[-self.window :, i].to_numpy().reshape(1, -1),
name=self.df.columns[i],
r_arr=r_arr,
window=self.window,
distance_metric=self.distance_metric,
k=self.k,
point_method=self.point_method,
prediction_interval=self.prediction_interval,
)
for i in range(self.df.shape[1])
)
else:
df_list = []
for i in range(self.df.shape[1]):
df_list.append(
looped_motif(
Xa=x.reshape(-1, x.shape[-1]) if self.multivariate else x[:, i],
Xb=self.df.iloc[-self.window :, i].to_numpy().reshape(1, -1),
name=self.df.columns[i],
r_arr=r_arr,
window=self.window,
distance_metric=self.distance_metric,
k=self.k,
point_method=self.point_method,
prediction_interval=self.prediction_interval,
)
)
complete = list(map(list, zip(*df_list)))
forecast = pd.concat(complete[0], axis=1)
forecast.index = test_index
lower_forecast = pd.concat(complete[1], axis=1)
lower_forecast.index = test_index
upper_forecast = pd.concat(complete[2], axis=1)
upper_forecast.index = test_index
if just_point_forecast:
return forecast
else:
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=forecast.index,
forecast_columns=forecast.columns,
lower_forecast=lower_forecast,
forecast=forecast,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Returns dict of new parameters for parameter tuning"""
metric_list = [
'braycurtis',
'canberra',
'chebyshev',
'cityblock',
'correlation',
'cosine',
'dice',
'euclidean',
'hamming',
'jaccard',
'jensenshannon',
'kulsinski',
'mahalanobis',
'matching',
'minkowski',
'rogerstanimoto',
'russellrao',
# 'seuclidean',
'sokalmichener',
'sokalsneath',
'sqeuclidean',
'yule',
]
return {
"window": random.choices([5, 7, 10, 15, 30], [0.2, 0.1, 0.5, 0.1, 0.1])[0],
"point_method": random.choices(
["weighted_mean", "mean", "median", "midhinge"], [0.4, 0.2, 0.2, 0.2]
)[0],
"distance_metric": random.choice(metric_list),
"k": random.choices([5, 10, 15, 20, 100], [0.2, 0.5, 0.1, 0.1, 0.1])[0],
"max_windows": random.choices([None, 1000, 10000], [0.01, 0.1, 0.8])[0],
}
def get_params(self):
"""Return dict of current parameters"""
return {
"window": self.window,
"point_method": self.point_method,
"distance_metric": self.distance_metric,
"k": self.k,
"max_windows": self.max_windows,
}
def predict_reservoir(
df,
forecast_length,
prediction_interval=None,
warmup_pts=1,
k=2,
ridge_param=2.5e-6,
seed_pts: int = 1,
seed_weighted: str = None,
):
"""Nonlinear Variable Autoregression or 'Next-Generation Reservoir Computing'
based on https://github.com/quantinfo/ng-rc-paper-code/
<NAME>., <NAME>., <NAME>. et al. Next generation reservoir computing. Nat Commun 12, 5564 (2021).
https://doi.org/10.1038/s41467-021-25801-2
with adjustments to make it probabilistic
This is very slow and memory hungry when n series/dimensions gets big (ie > 50).
Already effectively parallelized by linpack it seems.
It's very sensitive to error in most recent data point!
The seed_pts and seed_weighted can help address that.
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
k (int): the AR order (keep this small, larger is slow and usually pointless)
ridge_param (float): standard lambda for ridge regression
warmup_pts (int): in reality, passing 1 here (no warmup) is fine
seed_pts (int): number of back steps to use to simulate future
if > 10, also increases search space of probabilistic forecast
seed_weighted (str): how to summarize most recent points if seed_pts > 1
"""
assert k > 0, "nvar `k` must be > 0"
assert warmup_pts > 0, "nvar `warmup_pts` must be > 0"
assert df.shape[1] > k, "nvar input data must contain at least k+1 records"
n_pts = df.shape[1]
# handle short data edge case
min_train_pts = 12
max_warmup_pts = n_pts - min_train_pts
if warmup_pts >= max_warmup_pts:
warmup_pts = max_warmup_pts if max_warmup_pts > 0 else 1
traintime_pts = n_pts - warmup_pts # round(traintime / dt)
warmtrain_pts = warmup_pts + traintime_pts
testtime_pts = forecast_length + 1 # round(testtime / dt)
maxtime_pts = n_pts # round(maxtime / dt)
# input dimension
d = df.shape[0]
# size of the linear part of the feature vector
dlin = k * d
# size of nonlinear part of feature vector
dnonlin = int(dlin * (dlin + 1) / 2)
# total size of feature vector: constant + linear + nonlinear
dtot = 1 + dlin + dnonlin
# create an array to hold the linear part of the feature vector
x = np.zeros((dlin, maxtime_pts))
# fill in the linear part of the feature vector for all times
for delay in range(k):
for j in range(delay, maxtime_pts):
x[d * delay : d * (delay + 1), j] = df[:, j - delay]
# create an array to hold the full feature vector for training time
# (use ones so the constant term is already 1)
out_train = np.ones((dtot, traintime_pts))
# copy over the linear part (shift over by one to account for constant)
out_train[1 : dlin + 1, :] = x[:, warmup_pts - 1 : warmtrain_pts - 1]
# fill in the non-linear part
cnt = 0
for row in range(dlin):
for column in range(row, dlin):
# shift by one for constant
out_train[dlin + 1 + cnt] = (
x[row, warmup_pts - 1 : warmtrain_pts - 1]
* x[column, warmup_pts - 1 : warmtrain_pts - 1]
)
cnt += 1
# ridge regression: train W_out to map out_train to Lorenz[t] - Lorenz[t - 1]
W_out = (
(x[0:d, warmup_pts:warmtrain_pts] - x[0:d, warmup_pts - 1 : warmtrain_pts - 1])
@ out_train[:, :].T
@ np.linalg.pinv(
out_train[:, :] @ out_train[:, :].T + ridge_param * np.identity(dtot)
)
)
# create a place to store feature vectors for prediction
out_test = np.ones(dtot) # full feature vector
x_test = np.zeros((dlin, testtime_pts)) # linear part
# copy over initial linear feature vector
x_test[:, 0] = x[:, warmtrain_pts - 1]
# do prediction
for j in range(testtime_pts - 1):
# copy linear part into whole feature vector
out_test[1 : dlin + 1] = x_test[:, j] # shift by one for constant
# fill in the non-linear part
cnt = 0
for row in range(dlin):
for column in range(row, dlin):
# shift by one for constant
out_test[dlin + 1 + cnt] = x_test[row, j] * x_test[column, j]
cnt += 1
# fill in the delay taps of the next state
x_test[d:dlin, j + 1] = x_test[0 : (dlin - d), j]
# do a prediction
x_test[0:d, j + 1] = x_test[0:d, j] + W_out @ out_test[:]
pred = x_test[0:d, 1:]
if prediction_interval is not None or seed_pts > 1:
# this works by using n most recent points as different starting "seeds"
# this has the advantage of generating perfect bounds on perfect functions
# on real world data, well, things will be less perfect...
n_samples = 10 if seed_pts <= 10 else seed_pts
interval_list = []
for ns in range(n_samples):
out_test = np.ones(dtot) # full feature vector
x_int = np.zeros((dlin, testtime_pts + n_samples)) # linear part
# copy over initial linear feature vector
x_int[:, 0] = x[:, warmtrain_pts - 2 - ns]
# do prediction
for j in range(testtime_pts - 1 + n_samples):
# copy linear part into whole feature vector
out_test[1 : dlin + 1] = x_int[:, j] # shift by one for constant
# fill in the non-linear part
cnt = 0
for row in range(dlin):
for column in range(row, dlin):
# shift by one for constant
out_test[dlin + 1 + cnt] = x_int[row, j] * x_int[column, j]
cnt += 1
# fill in the delay taps of the next state
x_int[d:dlin, j + 1] = x_int[0 : (dlin - d), j]
# do a prediction
x_int[0:d, j + 1] = x_int[0:d, j] + W_out @ out_test[:]
start_slice = ns + 2
end_slice = start_slice + testtime_pts - 1
interval_list.append(x_int[:, start_slice:end_slice])
interval_list = np.array(interval_list)
if seed_pts > 1:
pred_int = np.concatenate(
[np.expand_dims(x_test[:, 1:], axis=0), interval_list]
)
# assuming interval_list has more recent first
if seed_weighted == "linear":
pred = np.average(
pred_int, axis=0, weights=range(pred_int.shape[0], 0, -1)
)[0:d]
elif seed_weighted == "exponential":
pred = np.average(
pred_int, axis=0, weights=np.geomspace(100, 1, pred_int.shape[0])
)[0:d]
else:
pred = np.quantile(pred_int, q=0.5, axis=0)[0:d]
pred_upper = np.nanquantile(interval_list, q=prediction_interval, axis=0)[0:d]
pred_upper = np.where(pred_upper < pred, pred, pred_upper)
pred_lower = np.nanquantile(interval_list, q=(1 - prediction_interval), axis=0)[
0:d
]
pred_lower = np.where(pred_lower > pred, pred, pred_lower)
return pred, pred_upper, pred_lower
else:
return pred
class NVAR(ModelObject):
"""Nonlinear Variable Autoregression or 'Next-Generation Reservoir Computing'
based on https://github.com/quantinfo/ng-rc-paper-code/
<NAME>., <NAME>., <NAME> al. Next generation reservoir computing. Nat Commun 12, 5564 (2021).
https://doi.org/10.1038/s41467-021-25801-2
with adjustments to make it probabilistic and to scale better
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
k (int): the AR order (keep this small, larger is slow and usually pointless)
ridge_param (float): standard lambda for ridge regression
warmup_pts (int): in reality, passing 1 here (no warmup) is fine
batch_size (int): nvar scales exponentially, to scale linearly, series are split into batches of size n
batch_method (str): method for collecting series to make batches
"""
def __init__(
self,
name: str = "NVAR",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2020,
verbose: int = 0,
k: int = 2,
ridge_param: float = 2.5e-6,
warmup_pts: int = 1,
seed_pts: int = 1,
seed_weighted: str = None,
batch_size: int = 5,
batch_method: str = "input_order",
**kwargs
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
self.k = k
self.ridge_param = ridge_param
self.warmup_pts = warmup_pts
self.seed_pts = seed_pts
self.seed_weighted = seed_weighted
self.batch_size = batch_size
self.batch_method = batch_method
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
"""
self.basic_profile(df)
if self.batch_method == "med_sorted":
df = df.loc[:, df.median().sort_values(ascending=False).index]
elif self.batch_method == "std_sorted":
df = df.loc[:, df.std().sort_values(ascending=False).index]
self.new_col_names = df.columns
self.batch_steps = ceil(df.shape[1] / self.batch_size)
self.df_train = df.to_numpy().T
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self, forecast_length: int, future_regressor=None, just_point_forecast=False
):
"""Generates forecast data immediately following dates of index supplied to .fit()
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
the_index = self.create_forecast_index(forecast_length=forecast_length)
df_list, df_list_up, df_list_low = [], [], []
# since already uses 100% cpu, no point parallelizing
for stp in range(self.batch_steps):
srt = stp * self.batch_size
stop = srt + self.batch_size
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fore, up, low = predict_reservoir(
self.df_train[srt:stop],
forecast_length=forecast_length,
warmup_pts=self.warmup_pts,
seed_pts=self.seed_pts,
seed_weighted=self.seed_weighted,
k=self.k,
ridge_param=self.ridge_param,
prediction_interval=self.prediction_interval,
)
df_list_up.append(
pd.DataFrame(
up.T,
columns=self.new_col_names[srt:stop],
index=the_index,
)
)
df_list.append(
pd.DataFrame(
fore.T,
columns=self.new_col_names[srt:stop],
index=the_index,
)
)
df_list_low.append(
pd.DataFrame(
low.T,
columns=self.new_col_names[srt:stop],
index=the_index,
)
)
forecast = pd.concat(df_list, axis=1)[self.column_names]
if just_point_forecast:
return forecast
else:
upper_forecast = pd.concat(df_list_up, axis=1)[self.column_names]
lower_forecast =
|
pd.concat(df_list_low, axis=1)
|
pandas.concat
|
"""Some utils functions for date handling and time traveling"""
import time
import numpy as np
import pandas as pd
# TODO: no hardcode: min(inputs.first_row)?
EPOCH = pd.Timestamp("2017-06-27").value
class TimeTraveler():
"""Class handling time travel tricks"""
def __init__(self):
self.now = None
def setTime(self, now):
"""
Set time to the given ´now´ nanoseconds
Used for time traveling purpose
"""
if now is None:
self.now = None
else:
self.now = int(now)
def getTime(self, force=False):
"""
Return the current time in nanoseconds
Used for time traveling purpose, so this might be a date in the past
matching the current simulation state, unless ´force´ is set to True.
"""
if not force and self.now is not None:
return self.now
return secToNano(time.time())
def nowMinus(
self, years=0, weeks=0, days=0, hours=0, minutes=0
): # pylint: disable=R0913
"""
Return the current timestamp (nanoseconds) minus the given parameters
This will take into account time traveling tricks.
"""
seconds = (
minutes * 60
+ hours * 60 * 60
+ days * 60 * 60 * 24
+ weeks * 60 * 60 * 24 * 7
+ years * 60 * 60 * 24 * 365.25
)
return self.getTime() - secToNano(seconds)
TIME_TRAVELER = TimeTraveler()
def toDatetime(df):
"""
Convert the index of the given dataframe to datetime
Also works directly on a dataframe index.
"""
if isinstance(df, pd.DataFrame):
df.index =
|
pd.to_datetime(df.index, unit="ns")
|
pandas.to_datetime
|
import pandas as pd
from datetime import timedelta
import seaborn as sns
from matplotlib import pyplot as plt
import matplotlib as mpl
from matplotlib.ticker import FormatStrFormatter
from matplotlib.dates import DateFormatter
import numpy as np
from os.path import join as join_path
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
from sklearn.ensemble import ExtraTreesRegressor
# START_DATE = '2014-09-29 00:00:00'
START_DATE = '2017-03-09 00:00:00'
END_DATE = '2017-05-29 23:00:00'
PATH = 'dataset'
NUMERAL = join_path(PATH, 'final_numeral.csv')
COUNT = join_path(PATH, 'count.csv')
sns.set()
# sns.set_palette('coolwarm')
# reading_count = pd.read_csv(COUNT, index_col=0, parse_dates=[0])
# # reading_count = reading_count.resample('AS').sum()
# mask = (reading_count.index >= START_DATE) & \
# (reading_count.index <= END_DATE)
# reading_count = reading_count.loc[mask]
# # print(reading_count)
# fig, ax = plt.subplots(figsize=(9, 3))
# fig.subplots_adjust(left=.07, bottom=.38, right=1, top=1)
# plt.xlim(reading_count.first_valid_index(), reading_count.last_valid_index())
# ax.xaxis.set_major_formatter(DateFormatter('%d/%m/%Y'))
# # ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))
# HOUSES = (
# 3, # 20451
# 13, # 20468
# 20, # 20451
# )
# print(reading_count)
# print(reading_count.sum(axis=0))
# for house in HOUSES:
# sns.lineplot(x=reading_count.index, y=f'kWh_{house}', data=reading_count,
# legend='brief', label=f'Casa {house}')
# ax.legend(loc=4)
# ax.tick_params(axis='x', rotation=45)
# ax.set_ylabel('# de Registros')
# ax.set_xlabel('Data (dia)')
# fig.savefig('registro_dia.pdf', dpi=600, format='pdf')
# # plt.show()
# exit()
# exit()
# fig, ax = plt.subplots(figsize=(9, 3))
# fig.subplots_adjust(left=.07, bottom=.28, right=.96, top=1)
# plt.xlim(reading_count.first_valid_index(), reading_count.last_valid_index())
# ax.xaxis.set_major_formatter(DateFormatter('%m/%Y'))
# HOUSES = (
# # 3, 4, 5, 6, 7,
# 8, # 20179
# # 9, 10, 11, 12,
# 13, # 20468
# # 14,
# 18, # 20232
# # 19, 20,
# )
# print(reading_count.sum(axis=0))
# for house in HOUSES:
# sns.lineplot(x=reading_count.index, y=f'kWh_{house}', data=reading_count,
# legend='brief', label=f'Casa {house}')
# ax.legend(loc=4)
# ax.tick_params(axis='x', rotation=45)
# ax.set_ylabel('# de Registros')
# ax.set_xlabel('Data (mês)')
# fig.savefig('registro_mes.pdf', dpi=600, format='pdf')
# # plt.show()
# exit()
fig, ax = plt.subplots(figsize=(9, 5))
fig.subplots_adjust(left=.12, bottom=.18, right=1.05, top=1)
dataset = pd.read_csv(NUMERAL, index_col=0, parse_dates=[0])
# temperature, weather, weekend, year, day_of_week
dataset.drop(columns=['dst'], inplace=True)
X = dataset.drop(columns=['kWh'])
y = dataset.kWh
# === data correlation ===
corr = dataset.corr('kendall') * 100
mask = np.triu(np.ones_like(corr, dtype=np.bool))
# f, ax = plt.subplots()
sns.heatmap(corr, cmap='coolwarm', center=0, mask=mask, annot=True,
square=False, linewidths=1, vmax=100, vmin=-100, fmt='.2f',
cbar_kws={"shrink": .75, 'format': '%.0f%%'})
ax.tick_params(axis='x', rotation=45)
# plt.show()
fig.savefig('correlacao.pdf', dpi=600, format='pdf')
exit()
# === SelectKBest ===
best_features = SelectKBest(score_func=f_regression, k='all')
fitted = best_features.fit(X, y)
scores_ = pd.DataFrame(fitted.scores_)
pvalues_ = pd.DataFrame(fitted.pvalues_)
columns_ = pd.DataFrame(X.columns)
final = pd.concat([columns_, scores_, pvalues_], axis=1)
final.columns = ['Feature', 'Score', 'p-Value']
final['p-Value'] *= 100
print(final.nlargest(13, 'Score'), '\n')
# print(final)
exit()
# # === model importance ===
# model = ExtraTreesRegressor()
# fitted_model = model.fit(X, y)
# print(fitted_model.feature_importances_)
# feat_importances = pd.Series(
# fitted_model.feature_importances_, index=X.columns)
# feat_importances.nsmallest(12).plot(kind='barh')
# plt.show()
# exit()
data = pd.read_csv('hue/Residential_1.csv')
data.date =
|
pd.to_datetime(data.date)
|
pandas.to_datetime
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# QTPyLib: Quantitative Trading Python Library
# https://github.com/ranaroussi/qtpylib
#
# Copyright 2016-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import atexit
import json
import logging
import os
import pickle
import sys
import tempfile
import time
import glob
import subprocess
from datetime import datetime
from abc import ABCMeta
import zmq
import pandas as pd
from dateutil.parser import parse as parse_date
import pymysql
from pymysql.constants.CLIENT import MULTI_STATEMENTS
from numpy import (
isnan as np_isnan,
nan as np_nan,
int64 as np_int64
)
from ezibpy import (
ezIBpy, dataTypes as ibDataTypes
)
from qtpylib import (
tools, asynctools, path, futures, __version__
)
# =============================================
# check min, python version
if sys.version_info < (3, 4):
raise SystemError("QTPyLib requires Python version >= 3.4")
# =============================================
# Configure logging
tools.createLogger(__name__, logging.INFO)
# Disable ezIBpy logging (Blotter handles errors itself)
logging.getLogger('ezibpy').setLevel(logging.CRITICAL)
# =============================================
# set up threading pool
__threads__ = tools.read_single_argv("--threads")
__threads__ = __threads__ if tools.is_number(__threads__) else None
asynctools.multitasking.createPool(__name__, __threads__)
# =============================================
cash_ticks = {}
class Blotter():
"""Broker class initilizer
:Optional:
name : string
name of the blotter (used by other modules)
symbols : str
IB contracts CSV database (default: ./symbols.csv)
ibport : int
TWS/GW Port to use (default: 4001)
ibclient : int
TWS/GW Client ID (default: 999)
ibserver : str
IB TWS/GW Server hostname (default: localhost)
zmqport : str
ZeroMQ Port to use (default: 12345)
zmqtopic : str
ZeroMQ string to use (default: _qtpylib_BLOTTERNAME_)
orderbook : str
Get Order Book (Market Depth) data (default: False)
dbhost : str
MySQL server hostname (default: localhost)
dbport : str
MySQL server port (default: 3306)
dbname : str
MySQL server database (default: qpy)
dbuser : str
MySQL server username (default: root)
dbpass : str
MySQL server password (default: none)
dbskip : str
Skip MySQL logging (default: False)
"""
__metaclass__ = ABCMeta
def __init__(self, name=None, symbols="symbols.csv",
ibport=4001, ibclient=999, ibserver="localhost",
dbhost="localhost", dbport="3306", dbname="qtpy",
dbuser="root", dbpass="", dbskip=False, orderbook=False,
zmqport="12345", zmqtopic=None, **kwargs):
# whats my name?
self.name = str(self.__class__).split('.')[-1].split("'")[0].lower()
if name is not None:
self.name = name
# initilize class logger
self.log_blotter = logging.getLogger(__name__)
# do not act on first tick (timezone is incorrect)
self.first_tick = True
self._bars = pd.DataFrame(
columns=['open', 'high', 'low', 'close', 'volume'])
self._bars.index.names = ['datetime']
self._bars.index = pd.to_datetime(self._bars.index, utc=True)
# self._bars.index = self._bars.index.tz_convert(settings['timezone'])
self._bars = {"~": self._bars}
self._raw_bars =
|
pd.DataFrame(columns=['last', 'volume'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
import itertools
from numpy import nan
import numpy as np
from pandas import (DataFrame, Series, Timestamp, date_range, compat,
option_context, Categorical)
from pandas.core.arrays import IntervalArray, integer_array
from pandas.compat import StringIO
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestDataFrameBlockInternals():
def test_cast_internals(self, float_frame):
casted = DataFrame(float_frame._data, dtype=int)
expected = DataFrame(float_frame._series, dtype=int)
assert_frame_equal(casted, expected)
casted = DataFrame(float_frame._data, dtype=np.int32)
expected = DataFrame(float_frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self, float_frame):
float_frame['E'] = 7.
consolidated = float_frame._consolidate()
assert len(consolidated._data.blocks) == 1
# Ensure copy, do I want this?
recons = consolidated._consolidate()
assert recons is not consolidated
tm.assert_frame_equal(recons, consolidated)
float_frame['F'] = 8.
assert len(float_frame._data.blocks) == 3
float_frame._consolidate(inplace=True)
assert len(float_frame._data.blocks) == 1
def test_consolidate_inplace(self, float_frame):
frame = float_frame.copy() # noqa
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
float_frame[chr(letter)] = chr(letter)
def test_values_consolidate(self, float_frame):
float_frame['E'] = 7.
assert not float_frame._data.is_consolidated()
_ = float_frame.values # noqa
assert float_frame._data.is_consolidated()
def test_modify_values(self, float_frame):
float_frame.values[5] = 5
assert (float_frame.values[5] == 5).all()
# unconsolidated
float_frame['E'] = 7.
float_frame.values[6] = 6
assert (float_frame.values[6] == 6).all()
def test_boolean_set_uncons(self, float_frame):
float_frame['E'] = 7.
expected = float_frame.values.copy()
expected[expected > 1] = 2
float_frame[float_frame > 1] = 2
assert_almost_equal(expected, float_frame.values)
def test_values_numeric_cols(self, float_frame):
float_frame['foo'] = 'bar'
values = float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
def test_values_lcd(self, mixed_float_frame, mixed_int_frame):
# mixed lcd
values = mixed_float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_float_frame[['A', 'B', 'C']].values
assert values.dtype == np.float32
values = mixed_float_frame[['C']].values
assert values.dtype == np.float16
# GH 10364
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_int_frame[['A', 'D']].values
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C']].values
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
values = mixed_int_frame[['B', 'C']].values
assert values.dtype == np.uint64
values = mixed_int_frame[['A', 'C']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C', 'D']].values
assert values.dtype == np.int64
values = mixed_int_frame[['A']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C']].values
assert values.dtype == np.uint8
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
df = DataFrame({'A': [2 ** 63 - 1]})
result = df['A']
expected = Series(np.asarray([2 ** 63 - 1], np.int64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2 ** 63]})
result = df['A']
expected = Series(np.asarray([2 ** 63], np.uint64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [datetime(2005, 1, 1), True]})
result = df['A']
expected = Series(np.asarray([datetime(2005, 1, 1), True], np.object_),
name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [None, 1]})
result = df['A']
expected = Series(np.asarray([np.nan, 1], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, 2]})
result = df['A']
expected = Series(np.asarray([1.0, 2], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3.0]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3.0], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, True]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, True], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, None]})
result = df['A']
expected = Series(np.asarray([1.0, np.nan], np.float_), name='A')
assert_series_equal(result, expected)
df =
|
DataFrame({'A': [1.0 + 2.0j, None]})
|
pandas.DataFrame
|
import json
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_leaflet as dl
from dash.dependencies import Output, Input
from scipy.io import arff
import pandas as pd
data = arff.loadarff('ProData_1999_2019_EM2_Class-Final.arff')
dt =
|
pd.DataFrame(data[0])
|
pandas.DataFrame
|
import itertools
import operator
from os.path import dirname, join
import numpy as np
import pandas as pd
import pytest
from pandas.core import ops
from pandas.tests.extension import base
from pandas.tests.extension.conftest import ( # noqa: F401
as_array,
as_frame,
as_series,
fillna_method,
groupby_apply_op,
use_numpy,
)
from pint.errors import DimensionalityError
from pint.testsuite import QuantityTestCase, helpers
import pint_pandas as ppi
from pint_pandas import PintArray
ureg = ppi.PintType.ureg
@pytest.fixture(params=[True, False])
def box_in_series(request):
"""Whether to box the data in a Series"""
return request.param
@pytest.fixture
def dtype():
return ppi.PintType("pint[meter]")
@pytest.fixture
def data():
return ppi.PintArray.from_1darray_quantity(
np.arange(start=1.0, stop=101.0) * ureg.nm
)
@pytest.fixture
def data_missing():
return ppi.PintArray.from_1darray_quantity([np.nan, 1] * ureg.meter)
@pytest.fixture
def data_for_twos():
x = [
2.0,
] * 100
return ppi.PintArray.from_1darray_quantity(x * ureg.meter)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
@pytest.fixture
def data_repeated(data):
"""Return different versions of data for count times"""
# no idea what I'm meant to put here, try just copying from https://github.com/pandas-dev/pandas/blob/master/pandas/tests/extension/integer/test_integer.py
def gen(count):
for _ in range(count):
yield data
yield gen
@pytest.fixture(params=[None, lambda x: x])
def sort_by_key(request):
"""
Simple fixture for testing keys in sorting methods.
Tests None (no key) and the identity key.
"""
return request.param
@pytest.fixture
def data_for_sorting():
return ppi.PintArray.from_1darray_quantity([0.3, 10, -50] * ureg.centimeter)
# should probably get more sophisticated and do something like
# [1 * ureg.meter, 3 * ureg.meter, 10 * ureg.centimeter]
@pytest.fixture
def data_missing_for_sorting():
return ppi.PintArray.from_1darray_quantity([4, np.nan, -5] * ureg.centimeter)
# should probably get more sophisticated and do something like
# [4 * ureg.meter, np.nan, 10 * ureg.centimeter]
@pytest.fixture
def na_cmp():
"""Binary operator for comparing NA values."""
return lambda x, y: bool(np.isnan(x.magnitude)) & bool(np.isnan(y.magnitude))
@pytest.fixture
def na_value():
return ppi.PintType("meter").na_value
@pytest.fixture
def data_for_grouping():
# should probably get more sophisticated here and use units on all these
# quantities
a = 1.0
b = 2.0 ** 32 + 1
c = 2.0 ** 32 + 10
return ppi.PintArray.from_1darray_quantity(
[b, b, np.nan, np.nan, a, a, b, c] * ureg.m
)
# === missing from pandas extension docs about what has to be included in tests ===
# copied from pandas/pandas/conftest.py
_all_arithmetic_operators = [
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__floordiv__",
"__rfloordiv__",
"__truediv__",
"__rtruediv__",
"__pow__",
"__rpow__",
"__mod__",
"__rmod__",
]
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations
"""
return request.param
@pytest.fixture(params=["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"])
def all_compare_operators(request):
"""
Fixture for dunder names for common compare operations
* >=
* >
* ==
* !=
* <
* <=
"""
return request.param
# commented functions aren't implemented
_all_numeric_reductions = [
"sum",
"max",
"min",
"mean",
# "prod",
# "std",
# "var",
"median",
# "kurt",
# "skew",
]
@pytest.fixture(params=_all_numeric_reductions)
def all_numeric_reductions(request):
"""
Fixture for numeric reduction names.
"""
return request.param
_all_boolean_reductions = ["all", "any"]
@pytest.fixture(params=_all_boolean_reductions)
def all_boolean_reductions(request):
"""
Fixture for boolean reduction names.
"""
return request.param
# =================================================================
class TestCasting(base.BaseCastingTests):
pass
class TestConstructors(base.BaseConstructorsTests):
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_series_constructor_no_data_with_index(self, dtype, na_value):
result = pd.Series(index=[1, 2, 3], dtype=dtype)
expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype)
self.assert_series_equal(result, expected)
# GH 33559 - empty index
result = pd.Series(index=[], dtype=dtype)
expected = pd.Series([], index=pd.Index([], dtype="object"), dtype=dtype)
self.assert_series_equal(result, expected)
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_series_constructor_scalar_na_with_index(self, dtype, na_value):
result = pd.Series(na_value, index=[1, 2, 3], dtype=dtype)
expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype)
self.assert_series_equal(result, expected)
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_series_constructor_scalar_with_index(self, data, dtype):
scalar = data[0]
result = pd.Series(scalar, index=[1, 2, 3], dtype=dtype)
expected = pd.Series([scalar] * 3, index=[1, 2, 3], dtype=dtype)
self.assert_series_equal(result, expected)
result = pd.Series(scalar, index=["foo"], dtype=dtype)
expected = pd.Series([scalar], index=["foo"], dtype=dtype)
self.assert_series_equal(result, expected)
class TestDtype(base.BaseDtypeTests):
pass
class TestGetitem(base.BaseGetitemTests):
def test_getitem_mask_raises(self, data):
mask = np.array([True, False])
msg = f"Boolean index has wrong length: 2 instead of {len(data)}"
with pytest.raises(IndexError, match=msg):
data[mask]
mask = pd.array(mask, dtype="boolean")
with pytest.raises(IndexError, match=msg):
data[mask]
class TestGroupby(base.BaseGroupbyTests):
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_groupby_apply_identity(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("A").B.apply(lambda x: x.array)
expected = pd.Series(
[
df.B.iloc[[0, 1, 6]].array,
df.B.iloc[[2, 3]].array,
df.B.iloc[[4, 5]].array,
df.B.iloc[[7]].array,
],
index=pd.Index([1, 2, 3, 4], name="A"),
name="B",
)
self.assert_series_equal(result, expected)
class TestInterface(base.BaseInterfaceTests):
pass
class TestMethods(base.BaseMethodsTests):
@pytest.mark.filterwarnings("ignore::pint.UnitStrippedWarning")
# See test_setitem_mask_broadcast note
@pytest.mark.parametrize("dropna", [True, False])
def test_value_counts(self, all_data, dropna):
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
expected = pd.Series(other).value_counts(dropna=dropna).sort_index()
self.assert_series_equal(result, expected)
@pytest.mark.filterwarnings("ignore::pint.UnitStrippedWarning")
# See test_setitem_mask_broadcast note
@pytest.mark.parametrize("box", [pd.Series, lambda x: x])
@pytest.mark.parametrize("method", [lambda x: x.unique(), pd.unique])
def test_unique(self, data, box, method):
duplicated = box(data._from_sequence([data[0], data[0]]))
result = method(duplicated)
assert len(result) == 1
assert isinstance(result, type(data))
assert result[0] == duplicated[0]
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_fillna_copy_frame(self, data_missing):
arr = data_missing.take([1, 1])
df = pd.DataFrame({"A": arr})
filled_val = df.iloc[0, 0]
result = df.fillna(filled_val)
assert df.A.values is not result.A.values
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_fillna_copy_series(self, data_missing):
arr = data_missing.take([1, 1])
ser = pd.Series(arr)
filled_val = ser[0]
result = ser.fillna(filled_val)
assert ser._values is not result._values
assert ser._values is arr
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_searchsorted(self, data_for_sorting, as_series): # noqa: F811
b, c, a = data_for_sorting
arr = type(data_for_sorting)._from_sequence([a, b, c])
if as_series:
arr = pd.Series(arr)
assert arr.searchsorted(a) == 0
assert arr.searchsorted(a, side="right") == 1
assert arr.searchsorted(b) == 1
assert arr.searchsorted(b, side="right") == 2
assert arr.searchsorted(c) == 2
assert arr.searchsorted(c, side="right") == 3
result = arr.searchsorted(arr.take([0, 2]))
expected = np.array([0, 2], dtype=np.intp)
self.assert_numpy_array_equal(result, expected)
# sorter
sorter = np.array([1, 2, 0])
assert data_for_sorting.searchsorted(a, sorter=sorter) == 0
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_where_series(self, data, na_value, as_frame): # noqa: F811
assert data[0] != data[1]
cls = type(data)
a, b = data[:2]
ser = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype))
cond = np.array([True, True, False, False])
if as_frame:
ser = ser.to_frame(name="a")
cond = cond.reshape(-1, 1)
result = ser.where(cond)
expected = pd.Series(
cls._from_sequence([a, a, na_value, na_value], dtype=data.dtype)
)
if as_frame:
expected = expected.to_frame(name="a")
self.assert_equal(result, expected)
# array other
cond = np.array([True, False, True, True])
other = cls._from_sequence([a, b, a, b], dtype=data.dtype)
if as_frame:
other = pd.DataFrame({"a": other})
cond = pd.DataFrame({"a": cond})
result = ser.where(cond, other)
expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype))
if as_frame:
expected = expected.to_frame(name="a")
self.assert_equal(result, expected)
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values(self, data_for_sorting, ascending, sort_by_key):
ser = pd.Series(data_for_sorting)
result = ser.sort_values(ascending=ascending, key=sort_by_key)
expected = ser.iloc[[2, 0, 1]]
if not ascending:
expected = expected[::-1]
self.assert_series_equal(result, expected)
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values_missing(
self, data_missing_for_sorting, ascending, sort_by_key
):
ser = pd.Series(data_missing_for_sorting)
result = ser.sort_values(ascending=ascending, key=sort_by_key)
if ascending:
expected = ser.iloc[[2, 0, 1]]
else:
expected = ser.iloc[[0, 2, 1]]
self.assert_series_equal(result, expected)
class TestArithmeticOps(base.BaseArithmeticOpsTests):
def check_opname(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
self._check_op(s, op, other, exc)
def _check_op(self, s, op, other, exc=None):
if exc is None:
result = op(s, other)
expected = s.combine(other, op)
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def _check_divmod_op(self, s, op, other, exc=None):
# divmod has multiple return values, so check separately
if exc is None:
result_div, result_mod = op(s, other)
if op is divmod:
expected_div, expected_mod = s // other, s % other
else:
expected_div, expected_mod = other // s, other % s
self.assert_series_equal(result_div, expected_div)
self.assert_series_equal(result_mod, expected_mod)
else:
with pytest.raises(exc):
divmod(s, other)
def _get_exception(self, data, op_name):
if op_name in ["__pow__", "__rpow__"]:
return op_name, DimensionalityError
else:
return op_name, None
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# series & scalar
op_name, exc = self._get_exception(data, all_arithmetic_operators)
s = pd.Series(data)
self.check_opname(s, op_name, s.iloc[0], exc=exc)
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op_name, exc = self._get_exception(data, all_arithmetic_operators)
df = pd.DataFrame({"A": data})
self.check_opname(df, op_name, data[0], exc=exc)
@pytest.mark.xfail(run=True, reason="s.combine does not accept arrays")
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op_name, exc = self._get_exception(data, all_arithmetic_operators)
s = pd.Series(data)
self.check_opname(s, op_name, data, exc=exc)
# parameterise this to try divisor not equal to 1
def test_divmod(self, data):
s = pd.Series(data)
self._check_divmod_op(s, divmod, 1 * ureg.Mm)
self._check_divmod_op(1 * ureg.Mm, ops.rdivmod, s)
@pytest.mark.xfail(run=True, reason="Test is deleted in pd 1.3, pd GH #39386")
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
# TODO: work out how to make this more specific/test for the two
# different possible errors here
with pytest.raises(Exception):
ops("foo")
# TODO: work out how to make this more specific/test for the two
# different possible errors here
with pytest.raises(Exception):
ops(
|
pd.Timestamp("20180101")
|
pandas.Timestamp
|
import numpy as np
import pytest
from pandas._libs import join as _join
from pandas import Categorical, DataFrame, Index, merge
import pandas._testing as tm
class TestIndexer:
@pytest.mark.parametrize(
"dtype", ["int32", "int64", "float32", "float64", "object"]
)
def test_outer_join_indexer(self, dtype):
indexer = _join.outer_join_indexer
left = np.arange(3, dtype=dtype)
right = np.arange(2, 5, dtype=dtype)
empty = np.array([], dtype=dtype)
result, lindexer, rindexer = indexer(left, right)
assert isinstance(result, np.ndarray)
assert isinstance(lindexer, np.ndarray)
assert isinstance(rindexer, np.ndarray)
tm.assert_numpy_array_equal(result, np.arange(5, dtype=dtype))
exp = np.array([0, 1, 2, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, 0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(empty, right)
tm.assert_numpy_array_equal(result, right)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(left, empty)
tm.assert_numpy_array_equal(result, left)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
def test_left_join_indexer_unique():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([2, 2, 3, 4, 4], dtype=np.int64)
result = _join.left_join_indexer_unique(b, a)
expected = np.array([1, 1, 2, 3, 3], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_left_outer_join_bug():
left = np.array(
[
0,
1,
0,
1,
1,
2,
3,
1,
0,
2,
1,
2,
0,
1,
1,
2,
3,
2,
3,
2,
1,
1,
3,
0,
3,
2,
3,
0,
0,
2,
3,
2,
0,
3,
1,
3,
0,
1,
3,
0,
0,
1,
0,
3,
1,
0,
1,
0,
1,
1,
0,
2,
2,
2,
2,
2,
0,
3,
1,
2,
0,
0,
3,
1,
3,
2,
2,
0,
1,
3,
0,
2,
3,
2,
3,
3,
2,
3,
3,
1,
3,
2,
0,
0,
3,
1,
1,
1,
0,
2,
3,
3,
1,
2,
0,
3,
1,
2,
0,
2,
],
dtype=np.int64,
)
right = np.array([3, 1], dtype=np.int64)
max_groups = 4
lidx, ridx = _join.left_outer_join(left, right, max_groups, sort=False)
exp_lidx = np.arange(len(left), dtype=np.int64)
exp_ridx = -np.ones(len(left), dtype=np.int64)
exp_ridx[left == 1] = 1
exp_ridx[left == 3] = 0
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_inner_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.inner_join_indexer(a, b)
index_exp = np.array([3, 5], dtype=np.int64)
tm.assert_almost_equal(index, index_exp)
aexp = np.array([2, 4], dtype=np.int64)
bexp = np.array([1, 2], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.inner_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_outer_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.outer_join_indexer(a, b)
index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(index, index_exp)
aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.int64)
bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.outer_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_left_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.left_join_indexer(a, b)
tm.assert_almost_equal(index, a)
aexp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
bexp = np.array([-1, -1, 1, -1, 2], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.left_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_left_join_indexer2():
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = _join.left_join_indexer(idx2.values, idx.values)
exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64)
tm.assert_almost_equal(ridx, exp_ridx)
def test_outer_join_indexer2():
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = _join.outer_join_indexer(idx2.values, idx.values)
exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64)
tm.assert_almost_equal(ridx, exp_ridx)
def test_inner_join_indexer2():
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = _join.inner_join_indexer(idx2.values, idx.values)
exp_res = np.array([1, 1, 2, 5], dtype=np.int64)
tm.assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2], dtype=np.int64)
tm.assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3], dtype=np.int64)
tm.assert_almost_equal(ridx, exp_ridx)
def test_merge_join_categorical_multiindex():
# From issue 16627
a = {
"Cat1": Categorical(["a", "b", "a", "c", "a", "b"], ["a", "b", "c"]),
"Int1": [0, 1, 0, 1, 0, 0],
}
a = DataFrame(a)
b = {
"Cat": Categorical(["a", "b", "c", "a", "b", "c"], ["a", "b", "c"]),
"Int": [0, 0, 0, 1, 1, 1],
"Factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6],
}
b = DataFrame(b).set_index(["Cat", "Int"])["Factor"]
expected = merge(
a,
b.reset_index(),
left_on=["Cat1", "Int1"],
right_on=["Cat", "Int"],
how="left",
)
result = a.join(b, on=["Cat1", "Int1"])
expected = expected.drop(["Cat", "Int"], axis=1)
|
tm.assert_frame_equal(expected, result)
|
pandas._testing.assert_frame_equal
|
import pandas as pd
import numpy as np
# 12.1 Categorical Data
s = pd.Series([1, 2, 2, 3, 3, 3, 5])
s.unique()
pd.unique(s)
pd.value_counts(s)
# dimension table
values = pd.Series([0, 1, 0, 0] * 2)
dim =
|
pd.Series(["apple", "orange"])
|
pandas.Series
|
import pandas as __pd
import datetime as __dt
from multiprocessing import Pool as __Pool
import multiprocessing as __mp
from functools import reduce as __red
from seffaflik.__ortak.__araclar import make_requests as __make_requests
from seffaflik.__ortak import __dogrulama as __dogrulama
from seffaflik.elektrik import santraller as __santraller
__first_part_url = "production/"
def organizasyonlar():
"""
Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan organizasyon bilgilerini vermektedir.
Parametreler
------------
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyon Bilgileri(Id, Adı, EIC Kodu, Kısa Adı, Durum)
"""
try:
particular_url = __first_part_url + "dpp-organization"
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["organizations"])
df.rename(index=str,
columns={"organizationId": "Id", "organizationName": "Adı",
"organizationETSOCode": "EIC Kodu", "organizationShortName": "Kısa Adı",
"organizationStatus": "Durum"},
inplace=True)
df = df[["Id", "Adı", "EIC Kodu", "Kısa Adı", "Durum"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def organizasyon_veris_cekis_birimleri(eic):
"""
İlgili eic değeri için Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan organizasyonun uzlaştırmaya
esas veriş-çekiş birim (UEVÇB) bilgilerini vermektedir.
Parametreler
------------
eic : metin formatında organizasyon eic kodu
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonun UEVÇB Bilgileri(Id, Adı, EIC Kodu)
"""
if __dogrulama.__kgup_girebilen_organizasyon_dogrulama(eic):
try:
particular_url = __first_part_url + "dpp-injection-unit-name?organizationEIC=" + eic
json = __make_requests(particular_url)
df_unit = __pd.DataFrame(json["body"]["injectionUnitNames"])
df_unit.rename(index=str, columns={"id": "Id", "name": "Adı", "eic": "EIC Kodu"}, inplace=True)
df_unit = df_unit[["Id", "Adı", "EIC Kodu"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df_unit
def tum_organizasyonlar_veris_cekis_birimleri():
"""
Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan tüm organizasyon ve bu organizasyonların
uzlaştırmaya esas veriş-çekiş birim (UEVÇB) bilgilerini vermektedir.
Parametreler
------------
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonlar ve UEVÇB Bilgileri(Org Id, Org Adı, Org EIC Kodu, Org Kısa Adı, Org Durum, UEVÇB Id,
UEVÇB Adı, UEVÇB EIC Kodu)
"""
list_org = organizasyonlar()[["Id", "Adı", "EIC Kodu", "Kısa Adı", "Durum"]].to_dict("records")
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.map(__organizasyon_cekis_birimleri, list_org, chunksize=1)
return __pd.concat(list_df_unit).reset_index(drop=True)
def kgup(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), organizasyon_eic="", uevcb_eic=""):
"""
İlgili tarih aralığı için kaynak bazlı kesinleşmiş günlük üretim planı (KGÜP) bilgisini vermektedir.
Not: "organizasyon_eic" değeri girildiği, "uevcb_eic" değeri girilmediği taktirde organizasyona ait tüm uevcb'lerin
toplamı için kgüp bilgisini vermektedir. Her iki değer de girildiği taktirde ilgili organizasyonun ilgili uevcb'si
için kgüp bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
organizasyon_eic : metin formatında organizasyon eic kodu (Varsayılan: "")
uevcb_eic : metin formatında metin formatında uevcb eic kodu (Varsayılan: "")
Geri Dönüş Değeri
-----------------
KGUP (Tarih, Saat, Doğalgaz, Barajlı, Linyit, Akarsu, İthal Kömür, Rüzgar, Fuel Oil, Jeo Termal, Taş Kömür, Biyokütle
,Nafta, Diğer, Toplam)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "dpp" + "?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi \
+ "&organizationEIC=" + organizasyon_eic + "&uevcbEIC=" + uevcb_eic
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["dppList"])
df["Saat"] = df["tarih"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["tarih"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"akarsu": "Akarsu", "barajli": "Barajlı", "biokutle": "Biyokütle", "diger": "Diğer",
"dogalgaz": "Doğalgaz", "fuelOil": "Fuel Oil", "ithalKomur": "İthal Kömür",
"jeotermal": "Jeo Termal", "linyit": "Linyit", "nafta": "Nafta",
"ruzgar": "Rüzgar", "tasKomur": "Taş Kömür", "toplam": "Toplam"}, inplace=True)
df = df[["Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "İthal Kömür", "Rüzgar",
"Fuel Oil", "Jeo Termal", "Taş Kömür", "Biyokütle", "Nafta", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def tum_organizasyonlar_kgup(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan tüm organizasyonların saatlik
KGUP bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonların KGUP Değerleri (Tarih, Saat, KGUP)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org = organizasyonlar()
list_org = org[["EIC Kodu", "Kısa Adı"]].to_dict("records")
org_len = len(list_org)
list_date_org_eic = list(zip([baslangic_tarihi] * org_len, [bitis_tarihi] * org_len, list_org))
list_date_org_eic = list(map(list, list_date_org_eic))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__kgup, list_date_org_eic, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def tum_uevcb_kgup(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan tüm organizasyonların
uzlaştırmaya esas veriş-çekiş birimlerinin saatlik KGUP bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonların UEVCB KGUP Değerleri (Tarih, Saat, KGUP)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org_uevcb = tum_organizasyonlar_veris_cekis_birimleri()
list_org_uevcb = org_uevcb[["Org EIC Kodu", "UEVÇB EIC Kodu", "UEVÇB Adı"]].to_dict("records")
list_org_uevcb_len = len(list_org_uevcb)
list_date_org_uevcb_eic = list(
zip([baslangic_tarihi] * list_org_uevcb_len, [bitis_tarihi] * list_org_uevcb_len, list_org_uevcb))
list_date_org_uevcb_eic = list(map(list, list_date_org_uevcb_eic))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__kgup_uevcb, list_date_org_uevcb_eic, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def eak(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), organizasyon_eic="", uevcb_eic=""):
"""
İlgili tarih aralığı için kaynak bazlı emre amade kapasite (EAK) bilgisini vermektedir.
Not: "organizasyon_eic" değeri girildiği, "uevcb_eic" değeri girilmediği taktirde organizasyona ait tüm uevcb'lerin
toplamı için eak bilgisini vermektedir. Her iki değer de girildiği taktirde ilgili organizasyonun ilgili uevcb'si
için kgüp bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
organizasyon_eic : metin formatında organizasyon eic kodu (Varsayılan: "")
uevcb_eic : metin formatında metin formatında uevcb eic kodu (Varsayılan: "")
Geri Dönüş Değeri
-----------------
EAK (Tarih, Saat, Doğalgaz, Barajlı, Linyit, Akarsu, İthal Kömür, Rüzgar, Fuel Oil, Jeo Termal, Taş Kömür, Biyokütle,
Nafta, Diğer, Toplam)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "aic" + "?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi \
+ "&organizationEIC=" + organizasyon_eic + "&uevcbEIC=" + uevcb_eic
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["aicList"])
df["Saat"] = df["tarih"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["tarih"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"akarsu": "Akarsu", "barajli": "Barajlı", "biokutle": "Biyokütle", "diger": "Diğer",
"dogalgaz": "Doğalgaz", "fuelOil": "Fuel Oil", "ithalKomur": "İthal Kömür",
"jeotermal": "Jeo Termal", "linyit": "Linyit", "nafta": "Nafta",
"ruzgar": "Rüzgar", "tasKomur": "Taş Kömür", "toplam": "Toplam"}, inplace=True)
df = df[["Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "İthal Kömür", "Rüzgar",
"Fuel Oil", "Jeo Termal", "Taş Kömür", "Biyokütle", "Nafta", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def tum_organizasyonlar_eak(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için Emre Amade Kapasite (EAK) girebilecek olan tüm organizasyonların saatlik EAK bilgilerini
vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
EAK Girebilen Organizasyonların EAK Değerleri (Tarih, Saat, EAK)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org = organizasyonlar()
list_org = org[["EIC Kodu", "Kısa Adı"]].to_dict("records")
org_len = len(list_org)
list_date_org_eic = list(zip([baslangic_tarihi] * org_len, [bitis_tarihi] * org_len, list_org))
list_date_org_eic = list(map(list, list_date_org_eic))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__eak, list_date_org_eic, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def tum_uevcb_eak(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için Emre Amade Kapasite (EAK) girebilecek olan tüm organizasyonların uzlaştırmaya esas
veriş-çekiş birimlerinin saatlik KGUP bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonların UEVCB KGUP Değerleri (Tarih, Saat, KGUP)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org_uevcb = tum_organizasyonlar_veris_cekis_birimleri()
list_org_uevcb = org_uevcb[["Org EIC Kodu", "UEVÇB EIC Kodu", "UEVÇB Adı"]].to_dict("records")
list_org_uevcb_len = len(list_org_uevcb)
list_date_org_uevcb_eic = list(
zip([baslangic_tarihi] * list_org_uevcb_len, [bitis_tarihi] * list_org_uevcb_len, list_org_uevcb))
list_date_org_uevcb_eic = list(map(list, list_date_org_uevcb_eic))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__eak_uevcb, list_date_org_uevcb_eic, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def kudup(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), organizasyon_id="", uevcb_id=""):
"""
İlgili tarih aralığı için gün içi piyasasının kapanışından sonra yapılan güncellemeyle kaynak bazlı Kesinleşmiş
Uzlaştırma Dönemi Üretim Planı (KUDÜP) bilgisini vermektedir.
Not: "organizasyon_eic" değeri girildiği, "uevcb_eic" değeri girilmediği taktirde organizasyona ait tüm uevcb'lerin
toplamı için kgüp bilgisini vermektedir. Her iki değer de girildiği taktirde ilgili organizasyonun ilgili uevcb'si
için kgüp bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
organizasyon_id : metin formatında organizasyon id (Varsayılan: "")
uevcb_id : metin formatında uevcb id (Varsayılan: "")
Geri Dönüş Değeri
-----------------
KUDÜP (Tarih, Saat, Doğalgaz, Barajlı, Linyit, Akarsu, İthal Kömür, Rüzgar, Fuel Oil, Jeo Termal, Taş Kömür,
Biyokütle, Nafta, Diğer, Toplam)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "sbfgp" + "?startDate=" + baslangic_tarihi + "&endDate=" + \
bitis_tarihi + "&organizationId=" + organizasyon_id + "&uevcbId=" + uevcb_id
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["dppList"])
df["Saat"] = df["tarih"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["tarih"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"akarsu": "Akarsu", "barajli": "Barajlı", "biokutle": "Biyokütle", "diger": "Diğer",
"dogalgaz": "Doğalgaz", "fuelOil": "Fuel Oil", "ithalKomur": "İthal Kömür",
"jeotermal": "Jeo Termal", "linyit": "Linyit", "nafta": "Nafta",
"ruzgar": "Rüzgar", "tasKomur": "Taş Kömür", "toplam": "Toplam"}, inplace=True)
df = df[["Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "İthal Kömür", "Rüzgar",
"Fuel Oil", "Jeo Termal", "Taş Kömür", "Biyokütle", "Nafta", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def uevm(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için saatlik Uzlaştırmaya Esas Variş Miktarı (UEVM) bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Uzlaştırmaya Esas Veriş Miktarı (Tarih, Saat, UEVM)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "ssv-categorized" + "?startDate=" + baslangic_tarihi + "&endDate=" + \
bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["ssvList"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"asphaltite": "Asfaltit Kömür", "river": "Akarsu", "dam": "Barajlı",
"biomass": "Biyokütle", "naturalGas": "Doğalgaz", "fueloil": "Fuel Oil",
"importedCoal": "İthal Kömür", "geothermal": "Jeo Termal", "lignite": "Linyit",
"naphtha": "Nafta", "lng": "LNG", "wind": "Rüzgar", "stonecoal": "Taş Kömür",
"international": "Uluslararası", "total": "Toplam", "other": "Diğer"},
inplace=True)
df = df[
["Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "<NAME>", "Rüzgar",
"Fuel Oil", "Jeo Termal", "Asfaltit Kömür", "Taş Kömür", "Biyokütle", "Nafta", "LNG", "Uluslararası",
"Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def gerceklesen(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), santral_id=""):
"""
İlgili tarih aralığı için lisanslı santrallerin toplam gerçek zamanlı üretim bilgisini vermektedir.
Not: "santral_id" değeri girildiği taktirde santrale ait gerçek zamanlı üretim bilgisini vermektedir.
Girilmediği taktirde toplam gerçek zamanlı üretim bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
santral_id : metin yada tam sayı formatında santral id (Varsayılan: "")
Geri Dönüş Değeri
-----------------
Gerçek Zamanlı Üretim("Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "<NAME>", "Rüzgar", "Güneş",
"Fuel Oil", "Jeo Termal", "Asf<NAME>", "Ta<NAME>", "Biokütle", "Nafta", "LNG", "Uluslararası",
"Toplam")
"""
if __dogrulama.__baslangic_bitis_tarih_id_dogrulama(baslangic_tarihi, bitis_tarihi, santral_id):
if santral_id == "":
return __gerceklesen(baslangic_tarihi, bitis_tarihi)
else:
return __santral_bazli_gerceklesen(baslangic_tarihi, bitis_tarihi, santral_id)
def tum_santraller_gerceklesen(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için tüm lisanslı santrallerin gerçek zamanlı üretim bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Tüm lisanslı santrallerin gerçek zamanlı üretim Değerleri (Tarih, Saat, Santral Üretimleri)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
sant = __santraller.gercek_zamanli_uretim_yapan_santraller()
list_sant = sant[["Id", "Kısa Adı"]].to_dict("records")
list_sant_len = len(list_sant)
list_sant = list(
zip([baslangic_tarihi] * list_sant_len, [bitis_tarihi] * list_sant_len, list_sant))
list_sant = list(map(list, list_sant))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__gerceklesen_santral, list_sant, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def gddk(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için geriye dönük düzeltme kalemine (GDDK) ait değerleri vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Dönemlik GDDK (₺)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = \
__first_part_url + "gddk-amount" + "?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi
json = __make_requests(particular_url)
df =
|
__pd.DataFrame(json["body"]["gddkAmountList"])
|
pandas.DataFrame
|
import pandas as pd
from tqdm import tqdm
import dgp
import design
from plan import Plan
import estimator as est
import evaluator as evalr
import numpy as np
def make_plan(designs):
plan = Plan()
for name, dgn, estr in designs:
plan.add_design(name, dgn, estr)
plan.add_evaluator('ATEError', evalr.ATEError)
plan.add_evaluator('ITEBias', evalr.ITEBias)
plan.add_evaluator('ITEMSE', evalr.ITEMSE)
plan.add_evaluator('CovariateMSE', evalr.CovariateMSE)
plan.add_evaluator('ATECovers', evalr.ATECovers)
plan.add_evaluator('CISize', evalr.CISize)
return plan
NUM_ITERS = 100
plan = make_plan([
('QuickBlock-B', design.QuickBlock, est.Blocking),
('QuickBlock-RF', design.QuickBlock, est.BlockingRF),
('OptBlock-B', design.OptBlock, est.Blocking),
('OptBlock-RF', design.OptBlock, est.BlockingRF),
('Randomization-RF', design.Bernoulli, est.OLSandRFT),
('Fixed Margins Randomization-RF', design.Complete, est.OLSandRFT),
('Rerandomization-RF', design.ReRandomization, est.OLSandRFT),
#('Rerandomization-KNN', design.ReRandomization, est.OLSandKNNT),
('Matched Pair-B', design.MatchedPair, est.MatchedPairBlocking),
('SoftBlock-L', design.SoftBlock, est.LaplacianNorm),
('SoftBlock-RF', design.SoftBlock, est.OLSandRFT),
('GreedyNeighbors-RF', design.GreedyNeighbors, est.OLSandRFT),
('GreedyNeighbors-L', design.GreedyNeighbors, est.LaplacianNorm),
#('KallusHeuristic-KNN', design.Heuristic, est.DMandKNNT),
('KallusHeuristic-RF', design.Heuristic, est.DMandRFT),
#('KallusPSOD-KNN', design.PSOD, est.DMandKNNT),
('KallusPSOD-RF', design.PSOD, est.DMandRFT),
])
dfs = []
dgp_factory_class_list = [dgp.LinearFactory, dgp.QuickBlockFactory, dgp.SinusoidalFactory, dgp.TwoCirclesFactory]
sample_sizes = [10, 20, 32, 50, 64, 72, 86, 100]
for sample_size in sample_sizes[::-1]:
print(f"Sample Size: {sample_size}")
dgp_factory_list = [factory(N=sample_size) for factory in dgp_factory_class_list]
for dgp_factory in dgp_factory_list:
dgp_name = type(dgp_factory.create_dgp()).__name__
print(f"DGP name: {dgp_name}")
for it in tqdm(range(NUM_ITERS)):
result = plan.execute(dgp_factory, seed=it * 1001)
result['iteration'] = it
result['sample_size'] = sample_size
result['dgp'] = dgp_name
filename = f"results/{dgp_name}_n{sample_size}_i{it}.csv.gz"
result.to_csv(filename, index=False)
dfs.append(result)
plan = make_plan([
('QuickBlock-B', design.QuickBlock, est.Blocking),
('QuickBlock-RF', design.QuickBlock, est.BlockingRF),
#('OptBlock-B', design.OptBlock, est.Blocking),
('OptBlock-RF', design.OptBlock, est.BlockingRF),
('SoftBlock-L', design.SoftBlock, est.LaplacianNorm),
('SoftBlock-RF', design.SoftBlock, est.OLSandRFT),
('GreedyNeighbors-RF', design.GreedyNeighbors, est.OLSandRFT),
('GreedyNeighbors-L', design.GreedyNeighbors, est.LaplacianNorm),
('Randomization-RF', design.Bernoulli, est.OLSandRFT),
('Fixed Margins Randomization-RF', design.Bernoulli, est.OLSandRFT),
('Rerandomization-RF', design.ReRandomization, est.OLSandRFT),
#('Rerandomization-KNN', design.ReRandomization, est.OLSandKNNT),
#('KallusPSOD-KNN', design.PSOD, est.DMandKNNT),
('KallusPSOD-RF', design.PSOD, est.DMandRFT),
])
sample_sizes = [128, 150, 200, 250, 500, 1000, 2000, 3000]
for sample_size in sample_sizes[::-1]:
print(f"Sample Size: {sample_size}")
dgp_factory_list = [factory(N=sample_size) for factory in dgp_factory_class_list]
for dgp_factory in dgp_factory_list:
dgp_name = type(dgp_factory.create_dgp()).__name__
print(f"DGP name: {dgp_name}")
for it in tqdm(range(NUM_ITERS)):
result = plan.execute(dgp_factory, seed=it * 1001)
result['iteration'] = it
result['sample_size'] = sample_size
result['dgp'] = dgp_name
filename = f"results/{dgp_name}_n{sample_size}_i{it}.csv.gz"
result.to_csv(filename, index=False)
dfs.append(result)
plan = make_plan([
('SoftBlock-L', design.SoftBlock, est.LaplacianNorm),
('SoftBlock-RF', design.SoftBlock, est.OLSandRFT),
('GreedyNeighbors-RF', design.GreedyNeighbors, est.OLSandRFT),
('GreedyNeighbors-L', design.GreedyNeighbors, est.LaplacianNorm),
('QuickBlock-B', design.QuickBlock, est.Blocking),
('QuickBlock-RF', design.QuickBlock, est.BlockingRF),
('Randomization', design.Bernoulli, est.OLSandRFT),
('Fixed Margins Randomization-RF', design.Complete, est.OLSandRFT),
('Rerandomization-RF', design.ReRandomization, est.OLSandRFT),
# ('Rerandomization-KNN', design.ReRandomization, est.OLSandKNNT),
])
NUM_ITERS = 25
sample_sizes = [4000, 5000, 7500, 10000, 12500, 15000, 20000, 25000, 30000, 50000, 100000]
for sample_size in sample_sizes:
print(f"Sample Size: {sample_size}")
dgp_factory_list = [factory(N=sample_size) for factory in dgp_factory_class_list]
for dgp_factory in dgp_factory_list:
dgp_name = type(dgp_factory.create_dgp()).__name__
print(f"DGP name: {dgp_name}")
for it in tqdm(range(NUM_ITERS)):
result = plan.execute(dgp_factory, seed=it * 1001)
result['iteration'] = it
result['sample_size'] = sample_size
result['dgp'] = dgp_name
filename = f"results/{dgp_name}_n{sample_size}_i{it}.csv.gz"
result.to_csv(filename, index=False)
dfs.append(result)
NUM_ITERS = 5
sample_sizes = [50000, 100000, 500000, 1000000]
for sample_size in sample_sizes:
print(f"Sample Size: {sample_size}")
dgp_factory_list = [factory(N=sample_size) for factory in dgp_factory_class_list]
for dgp_factory in dgp_factory_list:
dgp_name = type(dgp_factory.create_dgp()).__name__
print(f"DGP name: {dgp_name}")
for it in tqdm(range(NUM_ITERS)):
result = plan.execute(dgp_factory, seed=it * 1001)
result['iteration'] = it
result['sample_size'] = sample_size
result['dgp'] = dgp_name
filename = f"results/{dgp_name}_n{sample_size}_i{it}.csv.gz"
result.to_csv(filename, index=False)
dfs.append(result)
results =
|
pd.concat(dfs)
|
pandas.concat
|
import pandas as pd
tmtids = pd.read_csv('tmtids.csv')
tmtvac = pd.read_csv('tmt_speedvac_group.csv')
hphdate = pd.read_csv('hphdate.csv')
tmt_merged = pd.merge(tmtids, isodate, left_on='TMT-10 Set', right_on='TMT Set')
tmt_merged = pd.merge(tmt_merged, tmtvac, on='TMT Set')
tmt_merged = pd.merge(tmt_merged, hphdate, on='TMT Set')
tmt_merged = tmt_merged.drop('TMT-10 Set',axis=1)
prepids = pd.read_csv('prepids.csv')
neg20group = pd.read_csv('neg20group.csv')
gilsongroup = pd.read_csv('gilsongroup.csv')
lysegroup = pd.read_csv('lysegroup.csv')
prepdat = pd.read_csv('prep_data.csv')
prep_merged = pd.merge(prepids, neg20group, on='Prep Set')
prep_merged = pd.merge(prep_merged, gilsongroup, on='Prep Set')
prep_merged =
|
pd.merge(prep_merged, lysegroup, on='Prep Set')
|
pandas.merge
|
import numpy as np
import pytest
from pandas import Categorical, Series
import pandas._testing as tm
@pytest.mark.parametrize(
"keep, expected",
[
("first",
|
Series([False, False, False, False, True, True, False])
|
pandas.Series
|
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from numpy.polynomial.polynomial import polyfit
from scipy.stats import shapiro
from scipy.stats import ttest_ind as tt
from scipy.stats import spearmanr as corrp
import numpy as np
from statsmodels.graphics.gofplots import qqplot
font = {'family' : 'sans-serif',
'weight' : 'light',
'size' : 16}
matplotlib.rc('font', **font)
bad_indices=[]
sr_data=pd.read_csv('self_report_study2.csv') #load self-report data
mb_agnostic=pd.read_csv('mb_scores_rares_empirical_best.csv')
mb_scores=mb_agnostic['MB_behav']
state_t1=pd.read_csv('Gillan_TL_full_lrT.csv',header=None) #load state transition lrs
state_t=pd.read_csv('Gillan_Or_full_lrT_decay.csv',header=None) #load state transition lrs
print(len(state_t1))
r,p=corrp(state_t1[0],state_t[0])
print('CORREL ST TL both models : {}, p {}'.format(r,p))
it_mb=pd.read_csv('Gillan_Or_full_MB_decay.csv',header=None) #load MB beta
# it_mb=np.log(it_mb)
mf1=pd.read_csv('Gillan_Or_full_MF1_decay.csv',header=None)
mf2=pd.read_csv('Gillan_Or_full_mf2_decay.csv',header=None)
lr1=pd.read_csv('Gillan_Or_full_lr1_decay.csv',header=None)
# lr2=pd.read_csv('Gillan_O_full_lr2_decay.csv',header=None)
st=
|
pd.read_csv('Gillan_Or_full_st_decay.csv',header=None)
|
pandas.read_csv
|
import pandas as pd
import pytest
from kartothek.core.cube.cube import Cube
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex, PartitionIndex
from kartothek.io.eager_cube import build_cube
from kartothek.io.testing.utils import assert_num_row_groups
from kartothek.io_components.cube.write import MultiTableCommitAborted
from kartothek.io_components.metapartition import SINGLE_TABLE
from kartothek.serialization._parquet import ParquetSerializer
__all__ = (
"existing_cube",
"test_compression_is_compatible_on_extend_cube",
"test_fail_all_empty",
"test_fail_no_store_factory",
"test_fail_not_a_df",
"test_fail_wrong_dataset_ids",
"test_fails_incompatible_dtypes",
"test_fails_metadata_nested_wrong_type",
"test_fails_metadata_unknown_id",
"test_fails_metadata_wrong_type",
"test_fails_overlapping_payload_enrich",
"test_fails_overlapping_payload_overwrite",
"test_fails_overlapping_payload_partial",
"test_fails_overlapping_payload_seed",
"test_fails_seed_dataset",
"test_overwrite_move_columns",
"test_overwrite_single",
"test_rowgroups_are_applied_when_df_serializer_is_passed_to_extend_cube",
"test_simple",
"test_single_rowgroup_when_df_serializer_is_not_passed_to_extend_cube",
)
@pytest.fixture
def existing_cube(function_store):
df_source = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
df_enrich = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v2": [10, 11, 12, 13]}
)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
index_columns=["i1", "i2", "i3"],
)
build_cube(
data={"source": df_source, "enrich": df_enrich}, cube=cube, store=function_store
)
return cube
def test_simple(driver, function_store, existing_cube):
"""
Simple integration test w/ single extra dataset.
"""
df = pd.DataFrame(
{
"x": [0, 1, 2, 3],
"p": [0, 0, 1, 1],
"v3": [10, 11, 12, 13],
"i3": [100, 101, 102, 103],
}
)
result = driver(data={"extra": df}, cube=existing_cube, store=function_store)
assert set(result.keys()) == {"extra"}
ds = list(result.values())[0]
ds = ds.load_all_indices(function_store())
assert ds.uuid == existing_cube.ktk_dataset_uuid("extra")
assert len(ds.partitions) == 2
assert set(ds.indices.keys()) == {"p", "i3"}
assert isinstance(ds.indices["p"], PartitionIndex)
assert isinstance(ds.indices["i3"], ExplicitSecondaryIndex)
assert ds.table_name == SINGLE_TABLE
@pytest.mark.parametrize("chunk_size", [None, 2])
def test_rowgroups_are_applied_when_df_serializer_is_passed_to_extend_cube(
driver, function_store, existing_cube, chunk_size
):
"""
Test that the dataset is split into row groups depending on the chunk size
"""
df_extra = pd.DataFrame(
data={"x": [0, 1, 2, 3], "p": [0, 1, 1, 1]}, columns=["x", "p"],
)
result = driver(
data={"extra": df_extra},
cube=existing_cube,
store=function_store,
df_serializer=ParquetSerializer(chunk_size=chunk_size),
)
dataset = result["extra"].load_all_indices(function_store())
part_num_rows = {0: 1, 1: 3}
part_chunk_size = {0: chunk_size, 1: chunk_size}
assert len(dataset.partitions) == 2
assert_num_row_groups(function_store(), dataset, part_num_rows, part_chunk_size)
def test_single_rowgroup_when_df_serializer_is_not_passed_to_extend_cube(
driver, function_store, existing_cube
):
"""
Test that the dataset has a single row group as default path
"""
df_extra = pd.DataFrame(
data={"x": [0, 1, 2, 3], "p": [0, 1, 1, 1]}, columns=["x", "p"],
)
result = driver(data={"extra": df_extra}, cube=existing_cube, store=function_store,)
dataset = result["extra"].load_all_indices(function_store())
part_num_rows = {0: 1, 1: 3}
part_chunk_size = {0: None, 1: None}
assert len(dataset.partitions) == 2
assert_num_row_groups(function_store(), dataset, part_num_rows, part_chunk_size)
def test_compression_is_compatible_on_extend_cube(driver, function_store):
"""
Test that partitons written with different compression algorithms are compatible
The compression algorithms are not parametrized because their availability depends
on the arrow build. 'SNAPPY' and 'GZIP' are already assumed to be available in parts
of the code. A fully parametrized test would also increase runtime and test complexity
unnecessarily.
"""
# Build cube
df = pd.DataFrame(data={"x": [0, 1, 2, 3], "p": [0, 0, 1, 1]}, columns=["x", "p"],)
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="rg-cube")
build_cube(
data=df,
cube=cube,
store=function_store,
df_serializer=ParquetSerializer(compression="SNAPPY"),
)
df_extra = pd.DataFrame(
data={"x": [0, 1, 2, 3], "p": [0, 1, 1, 1]}, columns=["x", "p"],
)
result = driver(
data={"extra": df_extra},
cube=cube,
store=function_store,
df_serializer=ParquetSerializer(compression="GZIP"),
)
dataset = result["extra"].load_all_indices(function_store())
assert len(dataset.partitions) == 2
def test_fails_incompatible_dtypes(driver, function_store, existing_cube):
"""
Should also cross check w/ seed dataset.
"""
df = pd.DataFrame(
{
"x": [0.0, 1.0, 2.0, 3.0],
"p": [0, 0, 1, 1],
"v3": [10, 11, 12, 13],
"i3": [100, 101, 102, 103],
}
)
with pytest.raises(MultiTableCommitAborted) as exc_info:
driver(data={"extra": df}, cube=existing_cube, store=function_store)
cause = exc_info.value.__cause__
assert isinstance(cause, ValueError)
assert 'Found incompatible entries for column "x"' in str(cause)
assert not DatasetMetadata.exists(
existing_cube.ktk_dataset_uuid("extra"), function_store()
)
def test_fails_seed_dataset(driver, function_store, existing_cube):
"""
Users cannot overwrite seed dataset since it is used for consisteny checks.
"""
pre_keys = set(function_store().keys())
df = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]})
with pytest.raises(ValueError) as exc:
driver(
data={existing_cube.seed_dataset: df},
cube=existing_cube,
store=function_store,
)
assert 'Seed data ("source") cannot be written during extension.' in str(exc.value)
post_keys = set(function_store().keys())
assert pre_keys == post_keys
def test_fails_overlapping_payload_seed(driver, function_store, existing_cube):
"""
Forbidden by spec, results in problems during query.
"""
pre_keys = set(function_store().keys())
df = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]})
with pytest.raises(ValueError) as exc:
driver(data={"extra": df}, cube=existing_cube, store=function_store)
assert 'Payload written in "extra" is already present in cube: v1' in str(exc.value)
assert not DatasetMetadata.exists(
existing_cube.ktk_dataset_uuid("extra"), function_store()
)
post_keys = set(function_store().keys())
assert pre_keys == post_keys
def test_fails_overlapping_payload_enrich(driver, function_store, existing_cube):
"""
Forbidden by spec, results in problems during query.
"""
pre_keys = set(function_store().keys())
df = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v2": [10, 11, 12, 13]})
with pytest.raises(ValueError) as exc:
driver(data={"extra": df}, cube=existing_cube, store=function_store)
assert 'Payload written in "extra" is already present in cube: v2' in str(exc.value)
assert not DatasetMetadata.exists(
existing_cube.ktk_dataset_uuid("extra"), function_store()
)
post_keys = set(function_store().keys())
assert pre_keys == post_keys
def test_fails_overlapping_payload_partial(driver, function_store, existing_cube):
"""
Forbidden by spec, results in problems during query.
"""
pre_keys = set(function_store().keys())
df1 = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v2": [10, 11, 12, 13]})
df2 = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v3": [10, 11, 12, 13]})
with pytest.raises(ValueError) as exc:
driver(
data={"extra1": df1, "extra2": df2},
cube=existing_cube,
store=function_store,
)
assert 'Payload written in "extra1" is already present in cube: v2' in str(
exc.value
)
assert not DatasetMetadata.exists(
existing_cube.ktk_dataset_uuid("extra1"), function_store()
)
# extra2 might exist, depending on the compute graph
# extra2 keys might be present, only look that extra1 is absent
post_keys = set(function_store().keys())
extra_keys = post_keys - pre_keys
extra1_keys = {k for k in extra_keys if "extra1" in k}
assert extra1_keys == set()
def test_fails_overlapping_payload_overwrite(driver, function_store, existing_cube):
"""
Forbidden by spec, results in problems during query.
"""
pre_keys = set(function_store().keys())
df =
|
pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]})
|
pandas.DataFrame
|
import os
import audiofile
import audiofile as af
import numpy as np
import pandas as pd
import pytest
import audinterface
import audformat
def signal_duration(signal, sampling_rate):
return signal.shape[1] / sampling_rate
def signal_max(signal, sampling_rate):
return np.max(signal)
SEGMENT = audinterface.Segment(
process_func=lambda x, sr:
audinterface.utils.signal_index(
pd.to_timedelta(0),
pd.to_timedelta(x.shape[1] / sr, unit='s') / 2,
)
)
def signal_modification(signal, sampling_rate, subtract=False):
if subtract:
signal -= 0.1 * signal
else:
signal += 0.1 * signal
return signal
@pytest.mark.parametrize(
'process_func, segment, signal, sampling_rate, start, end, keep_nat, '
'channels, mixdown, expected_output',
[
(
signal_max,
None,
np.ones((1, 3)),
8000,
None,
None,
False,
None,
False,
1,
),
(
signal_max,
SEGMENT,
np.ones((1, 8000)),
8000,
None,
None,
False,
None,
False,
1,
),
(
signal_max,
None,
np.ones(3),
8000,
None,
None,
False,
0,
False,
1,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
0,
False,
0,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
0,
False,
0,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
1,
False,
1,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
None,
True,
0.5,
),
(
signal_max,
None,
np.array([[-1., -1., -1.], [0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
[1, 2],
True,
0.5,
),
# invalid channel selection
pytest.param(
signal_max,
None,
np.ones((1, 3)),
8000,
None,
None,
False,
1,
False,
1,
marks=pytest.mark.xfail(raises=ValueError),
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
None,
None,
False,
None,
False,
3.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.NaT,
pd.NaT,
False,
None,
False,
3.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.NaT,
pd.NaT,
True,
None,
False,
3.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.to_timedelta('1s'),
None,
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.to_timedelta('1s'),
pd.NaT,
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
None,
pd.to_timedelta('2s'),
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.NaT,
pd.to_timedelta('2s'),
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.to_timedelta('1s'),
pd.to_timedelta('2s'),
False,
None,
False,
1.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
'1s',
'2s',
False,
None,
False,
1.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
'1000ms',
'2000ms',
False,
None,
False,
1.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
1,
2,
False,
None,
False,
1.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
1.0,
2.0,
False,
None,
False,
1.0,
),
],
)
def test_process_file(
tmpdir,
process_func,
segment,
signal,
sampling_rate,
start,
end,
keep_nat,
channels,
mixdown,
expected_output,
):
process = audinterface.Process(
process_func=process_func,
sampling_rate=sampling_rate,
resample=False,
channels=channels,
mixdown=mixdown,
segment=segment,
keep_nat=keep_nat,
verbose=False,
)
# create test file
root = str(tmpdir.mkdir('wav'))
file = 'file.wav'
path = os.path.join(root, file)
af.write(path, signal, sampling_rate)
# test absolute path
y = process.process_file(
path,
start=start,
end=end,
)
np.testing.assert_almost_equal(
y.values, expected_output, decimal=4,
)
# test relative path
y = process.process_file(
file,
start=start,
end=end,
root=root,
)
np.testing.assert_almost_equal(
y.values, expected_output, decimal=4,
)
@pytest.mark.parametrize(
'process_func, num_files, signal, sampling_rate, starts, ends, '
'expected_output',
[
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
None,
None,
[3.0] * 2,
),
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
'1s',
'2s',
[1.0] * 2,
),
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
1,
2,
[1.0] * 2,
),
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
[None, 1],
[None, 2],
[3.0, 1.0],
),
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
[None, '1s'],
[None, '2s'],
[3.0, 1.0],
),
(
signal_duration,
3,
np.zeros((1, 24000)),
8000,
[None, '1s'],
[None, '2s', None],
[3.0, 1.0],
),
(
signal_duration,
1,
np.zeros((1, 24000)),
8000,
[None],
[None, '2s'],
[3.0],
),
],
)
def test_process_files(
tmpdir,
process_func,
num_files,
signal,
sampling_rate,
starts,
ends,
expected_output,
):
process = audinterface.Process(
process_func=process_func,
sampling_rate=sampling_rate,
resample=False,
verbose=False,
)
# create files
files = []
paths = []
root = tmpdir
for idx in range(num_files):
file = f'file{idx}.wav'
path = os.path.join(root, file)
af.write(path, signal, sampling_rate)
files.append(file)
paths.append(path)
# test absolute paths
output = process.process_files(
paths,
starts=starts,
ends=ends,
)
np.testing.assert_almost_equal(
output.values,
expected_output,
decimal=4,
)
# test relative paths
output = process.process_files(
files,
starts=starts,
ends=ends,
root=root,
)
np.testing.assert_almost_equal(
output.values,
expected_output,
decimal=4,
)
@pytest.mark.parametrize(
'num_files, segment, num_workers, multiprocessing',
[
(3, None, 1, False, ),
(3, None, 2, False, ),
(3, None, None, False, ),
(3, SEGMENT, 1, False, ),
]
)
def test_process_folder(
tmpdir,
num_files,
segment,
num_workers,
multiprocessing,
):
process = audinterface.Process(
process_func=None,
sampling_rate=None,
resample=False,
segment=segment,
num_workers=num_workers,
multiprocessing=multiprocessing,
verbose=False,
)
sampling_rate = 8000
path = str(tmpdir.mkdir('wav'))
files = [
os.path.join(path, f'file{n}.wav') for n in range(num_files)
]
for file in files:
signal = np.random.uniform(-1.0, 1.0, (1, sampling_rate))
af.write(file, signal, sampling_rate)
y = process.process_folder(path)
pd.testing.assert_series_equal(
y,
process.process_files(files),
)
def test_process_func_args():
def process_func(s, sr, arg1, arg2):
assert arg1 == 'foo'
assert arg2 == 'bar'
audinterface.Process(
process_func=process_func,
process_func_args={
'arg1': 'foo',
'arg2': 'bar',
}
)
with pytest.warns(UserWarning):
audinterface.Process(
feature_names=('o1', 'o2', 'o3'),
process_func=process_func,
arg1='foo',
arg2='bar',
)
@pytest.mark.parametrize(
'num_workers, multiprocessing',
[
(1, False, ),
(2, False, ),
(None, False, ),
]
)
def test_process_index(tmpdir, num_workers, multiprocessing):
process = audinterface.Process(
process_func=None,
sampling_rate=None,
resample=False,
num_workers=num_workers,
multiprocessing=multiprocessing,
verbose=False,
)
sampling_rate = 8000
signal = np.random.uniform(-1.0, 1.0, (1, 3 * sampling_rate))
# create file
root = str(tmpdir.mkdir('wav'))
file = 'file.wav'
path = os.path.join(root, file)
af.write(path, signal, sampling_rate)
# empty index
index = audformat.segmented_index()
y = process.process_index(index)
assert y.empty
# segmented index with absolute paths
index = audformat.segmented_index(
[path] * 3,
pd.timedelta_range('0s', '2s', 3),
pd.timedelta_range('1s', '3s', 3),
)
y = process.process_index(index)
for (path, start, end), value in y.items():
signal, sampling_rate = audinterface.utils.read_audio(
path, start=start, end=end
)
np.testing.assert_equal(signal, value)
# filewise index with absolute paths
index = audformat.filewise_index(path)
y = process.process_index(index)
for (path, start, end), value in y.items():
signal, sampling_rate = audinterface.utils.read_audio(
path, start=start, end=end
)
np.testing.assert_equal(signal, value)
# segmented index with relative paths
index = audformat.segmented_index(
[file] * 3,
pd.timedelta_range('0s', '2s', 3),
pd.timedelta_range('1s', '3s', 3),
)
y = process.process_index(index, root=root)
for (file, start, end), value in y.items():
signal, sampling_rate = audinterface.utils.read_audio(
file, start=start, end=end, root=root
)
np.testing.assert_equal(signal, value)
# filewise index with relative paths
index = audformat.filewise_index(path)
y = process.process_index(index, root=root)
for (file, start, end), value in y.items():
signal, sampling_rate = audinterface.utils.read_audio(
file, start=start, end=end, root=root
)
np.testing.assert_equal(signal, value)
@pytest.mark.parametrize(
'process_func, process_func_args, segment, signal, '
'sampling_rate, file, start, end, keep_nat, expected_signal',
[
(
None,
{},
None,
np.array([1., 2., 3.]),
44100,
None,
None,
None,
False,
np.array([1., 2., 3.]),
),
(
None,
{},
None,
np.array([1., 2., 3.]),
44100,
'file',
None,
None,
False,
np.array([1., 2., 3.]),
),
(
None,
{},
SEGMENT,
np.array([1., 2., 3., 4.]),
44100,
None,
None,
None,
False,
np.array([1., 2.]),
),
(
None,
{},
SEGMENT,
np.array([1., 2., 3., 4.]),
44100,
'file',
None,
None,
False,
np.array([1., 2.]),
),
(
signal_max,
{},
None,
np.array([1., 2., 3.]),
44100,
None,
None,
None,
False,
3.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
3,
None,
None,
None,
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
pd.to_timedelta('2s'),
None,
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
None,
pd.to_timedelta('1s'),
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
None,
pd.NaT,
False,
3.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
None,
pd.NaT,
True,
3.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
pd.to_timedelta('1s'),
pd.to_timedelta('2s'),
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
1,
2,
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
1.0,
2.0,
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
'1s',
'2s',
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
'file',
pd.to_timedelta('1s'),
pd.to_timedelta('2s'),
False,
1.0,
),
(
signal_modification,
{},
None,
np.array([1., 1., 1.]),
44100,
None,
None,
None,
False,
np.array([[1.1, 1.1, 1.1]]),
),
(
signal_modification,
{'subtract': False},
None,
np.array([1., 1., 1.]),
44100,
None,
None,
None,
False,
np.array([[1.1, 1.1, 1.1]]),
),
(
signal_modification,
{'subtract': True},
None,
np.array([1., 1., 1.]),
44100,
None,
None,
None,
False,
np.array([[0.9, 0.9, 0.9]]),
),
],
)
def test_process_signal(
process_func,
process_func_args,
segment,
signal,
sampling_rate,
file,
start,
end,
keep_nat,
expected_signal,
):
process = audinterface.Process(
process_func=process_func,
sampling_rate=None,
resample=False,
segment=segment,
keep_nat=keep_nat,
verbose=False,
process_func_args=process_func_args,
)
x = process.process_signal(
signal,
sampling_rate,
file=file,
start=start,
end=end,
)
signal = np.atleast_2d(signal)
if start is None or pd.isna(start):
start = pd.to_timedelta(0)
elif isinstance(start, (int, float)):
start = pd.to_timedelta(start, 's')
elif isinstance(start, str):
start = pd.to_timedelta(start)
if end is None or (pd.isna(end) and not keep_nat):
end = pd.to_timedelta(
signal.shape[1] / sampling_rate,
unit='s',
)
elif isinstance(end, (int, float)):
end = pd.to_timedelta(end, 's')
elif isinstance(end, str):
end = pd.to_timedelta(end)
if segment is not None:
index = segment.process_signal(
signal,
sampling_rate,
start=start,
end=end,
)
start = index[0][0]
end = index[0][1]
if file is None:
y = pd.Series(
[expected_signal],
index=audinterface.utils.signal_index(start, end),
)
else:
y = pd.Series(
[expected_signal],
index=audformat.segmented_index(file, start, end),
)
pd.testing.assert_series_equal(x, y)
@pytest.mark.parametrize(
'num_workers, multiprocessing',
[
(1, False, ),
(2, False, ),
(None, False, ),
]
)
@pytest.mark.parametrize(
'process_func, signal, sampling_rate, index',
[
(
None,
np.random.random(5 * 44100),
44100,
audinterface.utils.signal_index(),
),
(
None,
np.random.random(5 * 44100),
44100,
audinterface.utils.signal_index(
pd.timedelta_range('0s', '3s', 3),
pd.timedelta_range('1s', '4s', 3)
),
),
(
signal_max,
np.random.random(5 * 44100),
44100,
audinterface.utils.signal_index(
pd.timedelta_range('0s', '3s', 3),
pd.timedelta_range('1s', '4s', 3),
),
),
(
signal_max,
np.random.random(5 * 44100),
44100,
audinterface.utils.signal_index(),
),
pytest.param(
signal_max,
np.random.random(5 * 44100),
44100,
pd.MultiIndex.from_arrays(
[
pd.timedelta_range('0s', '3s', 3),
],
),
marks=pytest.mark.xfail(raises=ValueError)
),
pytest.param(
signal_max,
np.random.random(5 * 44100),
44100,
pd.MultiIndex.from_arrays(
[
['wrong', 'data', 'type'],
pd.timedelta_range('1s', '4s', 3),
],
),
marks=pytest.mark.xfail(raises=ValueError)
),
pytest.param(
signal_max,
np.random.random(5 * 44100),
44100,
pd.MultiIndex.from_arrays(
[
pd.timedelta_range('0s', '3s', 3),
['wrong', 'data', 'type'],
],
),
marks=pytest.mark.xfail(raises=ValueError)
),
],
)
def test_process_signal_from_index(
num_workers,
multiprocessing,
process_func,
signal,
sampling_rate,
index,
):
process = audinterface.Process(
process_func=process_func,
sampling_rate=None,
resample=False,
num_workers=num_workers,
multiprocessing=multiprocessing,
verbose=False,
)
result = process.process_signal_from_index(signal, sampling_rate, index)
expected = []
for start, end in index:
expected.append(
process.process_signal(signal, sampling_rate, start=start, end=end)
)
if not expected:
pd.testing.assert_series_equal(
result,
pd.Series([], index, dtype=float),
)
else:
pd.testing.assert_series_equal(
result,
pd.concat(expected, names=['start', 'end']),
)
@pytest.mark.parametrize(
'segment',
[
audinterface.Segment(
process_func=lambda x, sr: audinterface.utils.signal_index()
),
audinterface.Segment(
process_func=lambda x, sr:
audinterface.utils.signal_index(
pd.to_timedelta(0),
pd.to_timedelta(x.shape[1] / sr, unit='s') / 2,
)
),
audinterface.Segment(
process_func=lambda x, sr:
audinterface.utils.signal_index(
pd.to_timedelta(x.shape[1] / sr, unit='s') / 2,
|
pd.to_timedelta(x.shape[1] / sr, unit='s')
|
pandas.to_timedelta
|
# coding: utf-8
"""
"""
import pandas as pd
import numpy as np
import re
import csv
import io
import time
import traceback
import logging
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s", level=logging.DEBUG)
def cal_jlr_zengzhanglv(data, col1, col2):
kuisun_count = 0
if not (data.iat[0] > 0 and data.iat[-1] > 0):
fhzzl = np.nan
else:
# 复合增长率
fhzzl = ((data.iat[0] / data.iat[-1]) ** (1.0 / (len(data) - 1)) - 1) * 100
for d in data[:-1]:
if d < 0:
kuisun_count += 1
return pd.Series({col1: fhzzl, col2: kuisun_count})
def cal_PEG(data, col1, col2):
# data.iat[0] is PE
if not (data.iat[0] > 0 and data.iat[1] > 0 and data.iat[-1] > 0):
peg = np.nan
fhzzl = np.nan
else:
# 复合增长率
fhzzl = ((data.iat[1] / data.iat[-1]) ** (1.0 / (len(data) - 2)) - 1) * 100
if fhzzl == 0:
peg = np.nan
else:
peg = data.iat[0] / fhzzl
return pd.Series({col1: fhzzl, col2: peg})
def generate_date_label(start_label):
dongtai_label = start_label
if pd.Timestamp(dongtai_label).is_year_end:
jingtai_label = dongtai_label
else:
jingtai_label = (pd.Timestamp(dongtai_label)- pd.offsets.YearEnd(1)).strftime("%Y%m%d")
# q4 ttm = q4 + last q4 - last q4
# q3 ttm = q3 + last q4 - last q3
# q2 ttm = q2 + last q4 - last q2
# q1 ttm = q1 + last q4 - last q1
ttm_label1 = dongtai_label
ttm_label2 = (pd.Timestamp(dongtai_label) -
pd.offsets.YearEnd(1)).strftime("%Y%m%d")
ttm_label3 = (pd.Timestamp(dongtai_label) -
|
pd.offsets.DateOffset(years=1)
|
pandas.offsets.DateOffset
|
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
from datetime import datetime
from io import StringIO
import os
import pytest
from pandas import DataFrame, Index, MultiIndex
import pandas._testing as tm
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"""foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
""",
{"index_col": 0, "names": ["index", "A", "B", "C", "D"]},
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),
columns=["A", "B", "C", "D"],
),
),
(
"""foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
""",
{"index_col": [0, 1], "names": ["index1", "index2", "A", "B", "C", "D"]},
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
],
names=["index1", "index2"],
),
columns=["A", "B", "C", "D"],
),
),
],
)
def test_pass_names_with_index(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_no_level_names(all_parsers, index_col):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
headless_data = "\n".join(data.split("\n")[1:])
names = ["A", "B", "C", "D"]
parser = all_parsers
result = parser.read_csv(
StringIO(headless_data), index_col=index_col, header=None, names=names
)
expected = parser.read_csv(StringIO(data), index_col=index_col)
# No index names in headless data.
expected.index.names = [None] * 2
tm.assert_frame_equal(result, expected)
def test_multi_index_no_level_names_implicit(all_parsers):
parser = all_parsers
data = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
]
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected,header",
[
("a,b",
|
DataFrame(columns=["a", "b"])
|
pandas.DataFrame
|
import os
import json
import time
import gc
import torch
import shutil
import pandas as pd
import numpy as np
from deepbiosphere.scripts import GEOCLEF_Utils as utils
from deepbiosphere.scripts import GEOCLEF_Dataset as dataset
from deepbiosphere.scripts import GEOCLEF_Config as config
from deepbiosphere.scripts.GEOCLEF_Config import paths
import deepbiosphere.scripts.GEOCLEF_Run as run
import sklearn.metrics as metrics
EXTRA_COLUMNS = [
'NA_L1NAME',
'NA_L2NAME',
'NA_L3NAME',
'US_L3NAME',
'city',
'lat',
'lon',
'region',
'test'
]
# this assumes that all models we're running metrics on use the same dataset
# and the same number of top species (num_species)s
def run_metrics_and_longform(args):
# get directory for all these runs
save_dir = config.get_res_dir(args.base_dir)
base_dir = args.base_dir
# take a relative path to a file within GeoCELF2020 that a file that contains a dict of the configs
# then, move this file into the results directory so that the names are saved with the resulting dataframes
cfg_pth = args.base_dir + args.config_path
filename = cfg_pth.split('/')[-1]
shutil.copyfile(cfg_pth, save_dir + filename)
cfg_pth = save_dir + filename
with open(cfg_pth, 'r') as fp:
cfgs = json.load(fp)
# load configs in
# print(cfgs)
# print("---")
models = cfgs['models']
params = load_configs(models, base_dir)
g_t = load_configs({"ground_truth" : cfgs['ground_truth']}, base_dir)
prm = list(params.values())[0]
num_specs = args.num_species
dset= run.setup_dataset(prm.params.observation, args.base_dir, prm.params.organism, prm.params.region, prm.params.normalize, prm.params.no_altitude, prm.params.dataset, prm.params.threshold, num_species=num_specs)
# load data from configs
all_df, ground_truth = load_data(params, g_t['ground_truth'], args.which_taxa)
# add threshold parameter to file so it's saved for future use
if 'pres_threshold' not in cfgs.keys():
cfgs['pres_threshold'] = args.threshold
with open(cfg_pth, 'w') as fp:
json.dump(cfgs, fp)
fp.close()
threshold = args.pres_threshold
else:
threshold = cfgs['pres_threshold']
if 'ecoregion' not in cfgs.keys():
cfgs['ecoregion'] = args.ecoregion
with open(cfg_pth, 'w') as fp:
json.dump(cfgs, fp)
fp.close()
ecoregion = args.ecoregion
else:
ecoregion = cfgs['ecoregion']
device = args.device
# check what indices are train / test
# check column labels are correct
# get rid of extra columns laying around
for name, dic in all_df.items():
for mod, df in dic.items():
if 'Unnamed: 0' in df.columns:
df.pop( 'Unnamed: 0')
for taxa, df in ground_truth.items():
if 'Unnamed: 0' in df.columns:
df.pop( 'Unnamed: 0')
# get labels of each unique taxa in data
# print(all_df.keys(), "---")
taxa_names = check_colum_names(dset.num_specs, dset.num_gens, dset.num_fams, all_df)
# convert logits and probabilities to presences and absences based on probabilities
pres_df = pred_2_pres(all_df, taxa_names, device, threshold)
# metrics to use
overall_mets = [
metrics.precision_score,
metrics.recall_score,
metrics.f1_score,
metrics.accuracy_score,
#metrics.roc_auc_score # takes too long to run
]
mets = [
metrics.precision_score,
metrics.recall_score,
metrics.f1_score,
metrics.accuracy_score
]
# TODO: handle these bad boys
# mets_extra = [
# metrics.roc_curve, # proba
# metrics.confusion_matrix # pres-abs
# ]
# run all per-label metrics globally
per_spec_glob_mets = sklearn_per_taxa_overall(pres_df, ground_truth, overall_mets, taxa_names)
pth = save_dir + "per_species_overall_{}.csv".format(cfgs['exp_id'])
print("savial:::dfasfsadsadg to:", pth)
per_spec_glob_mets.to_csv(pth)
print("global metrics done")
# run all per-label metrics within ecoregions
# ecoregion = args.ecoregion
per_spec_eco_mets = sklearn_per_taxa_ecoregion(pres_df, ground_truth, mets, taxa_names, ecoregion)
pth = save_dir + "per_species_eco_{}_{}.csv".format(ecoregion, cfgs['exp_id'])
per_spec_eco_mets.to_csv(pth)
print("per-ecoregion metrics done")
# run all per-label metrics and preserve for all labels
for taxa in pres_df.keys():
per_spec_all = sklearn_per_taxa_individual(pres_df[taxa], ground_truth[taxa], taxa_names[taxa])
pth = save_dir + "per_{}_by_{}_{}.csv".format(taxa, taxa, cfgs['exp_id'])
per_spec_all.to_csv(pth)
print("per-species metrics done")
# run all observation
for taxa in pres_df.keys():
# print(taxa_names[taxa])
per_obs_all = inhouse_per_observation(pres_df[taxa], ground_truth[taxa], taxa_names[taxa], args.device)
pth = save_dir + "per_obs_by_{}_{}.csv".format(taxa, cfgs['exp_id'])
print("saving to ", pth)
per_obs_all.to_csv(pth)
print("per-observation metrics done")
# TODO: add mets_extra
def load_configs(cfgs, base_dir):
# get these configs
params = {}
for name, cfg in cfgs.items():
# print(cfg)
param = config.Run_Params(base_dir = base_dir, cfg_path = cfg)
params[name] = param
return params
def load_data(params, ground_truth, which_taxa):
# load these configs' inference data
print("loading ground truth")
spt, gent, famt = ground_truth.get_most_recent_inference()
if which_taxa == 'spec_gen_fam':
g_t = {
'species' : pd.read_csv(spt),
'genus' : pd.read_csv(gent),
'family' : pd.read_csv(famt)
}
elif which_taxa =='spec_only':
g_t = {
'species' : pd.read_csv(spt)
}
else:
raise NotImplementedError("not yet impletmented for ", which_taxa)
data_s = {}
data_g = {}
data_f = {}
ground_truth ={}
for name, param in params.items():
print("loading model ", name)
tick = time.time()
if which_taxa == 'spec_only':
sp = param.get_most_recent_inference(which_taxa=which_taxa)
# do spec only
data_s[name] = pd.read_csv(sp)
elif which_taxa == 'spec_gen_fam':
sp, gen, fam = param.get_most_recent_inference(which_taxa=which_taxa)
# do spgenfam
data_s[name] = pd.read_csv(sp)
data_g[name] = pd.read_csv(gen)
data_f[name] = pd.read_csv(fam)
else:
raise NotImplementedError('inference not yet implemented for ', which_taxa)
tock = time.time()
print("loading {} took {} minutes".format(name, ((tock-tick)/60)))
if which_taxa == 'spec_only':
all_df = {
'species' : data_s,
}
elif which_taxa == 'spec_gen_fam':
all_df = {
'species' : data_s,
'genus' : data_g,
'family' : data_f
}
else:
raise NotImplementedError('inference not yet implemented for ', which_taxa)
return all_df, g_t
def check_colum_names(num_specs, num_gens, num_fams, all_df, dset=None):
species_columns, genus_columns, family_columns = None, None, None
for name, taxa in all_df.items():
# print("name is ", name)
# print("----")
if dset is not None:
num_specs = dset.num_specs
num_gens = dset.num_gens
num_fams = dset.num_fams
col_labels = []
for n, model in taxa.items():
col_labels.append(list(model.columns))
to_comp = col_labels[0]
for i in col_labels:
assert to_comp == i, "column names are not consistent!"
for i in col_labels:
if name == 'species':
specs = set(i) - set(EXTRA_COLUMNS)
# print(len(specs), dset.num_specs)
assert len(specs) == num_specs, "unexpected columns present in species inference file!"
species_columns = specs
elif name == 'genus':
specs = set(i) - set(EXTRA_COLUMNS)
# print(len(specs), dset.num_gens)
assert len(specs) == num_gens, "unexpected columns present in genus inference file!"
genus_columns = specs
else:
specs = set(i) - set(EXTRA_COLUMNS)
# print(len(specs), dset.num_fams)
assert len(specs) == num_fams, "unexpected columns present in family inference file!"
family_columns = specs
taxa_names = {
'species' : species_columns,
'genus' : genus_columns,
'family' : family_columns
}
return taxa_names
# 1. convert predictions to presence / absence
def pred_2_pres(all_df, taxa_names, device, threshold):
pres_df = {}
for taxa, dic in all_df.items():
new_dict = {}
for name, df in dic.items():
cols_2_keep = list(taxa_names[taxa])
obs = df[cols_2_keep].values
obs = torch.tensor(obs)
obs.to(device)
if name == 'random_forest' or name == 'maxent':
# already a probability, just threshold
binn = (obs > threshold).float()
else:
# first convert logit to probability
obs = torch.sigmoid(obs)
# then threshold probability
# sneaky: NaN is a very small double, but when you convert it to float32 with .float()
# then it gets rounded to a very negative number, but no longer gets mapped to NaN
# therefore, have to make sure to convert the bool array to a double so that
# NaN statuts gets carried over and preserved
binn = (obs > threshold).double()
# sketchy, but the below line essentially converts over the nans if inference
# only run on either test or train data, so that metrics aren't accidentally
# calculated for portions of the dataset inference wasn't actually run on
binn[torch.isnan(obs)] = obs[torch.isnan(obs)]
# convert back to numpy and df
out = binn.cpu().numpy()
# now this new df will have NaN values for any observations that inference wasn't run on
new_dict[name] = utils.numpy_2_df(out, taxa_names[taxa], df, EXTRA_COLUMNS)
pres_df[taxa] = new_dict
return pres_df
# 1. convert predictions to probabilities
def pred_2_proba(all_df, taxa_names, device):
prob_df = {}
for taxa, dic in all_df.items():
new_dict = {}
for name, df in dic.items():
if name != 'random_forest' and name != 'maxent':
col_2_keep = list(taxa_names[taxa])
obs = df[col_2_keep].values
obs = torch.tensor(obs)
obs.to(device)
obs = torch.sigmoid(obs)
# convert back to numpy and df
out = obs.cpu().numpy()
new_dict[name] = utils.numpy_2_df(out, col_2_keep, df, EXTRA_COLUMNS)
else:
# random forest and maxent are already probabilities, just pass on
new_dict[name] = df
prob_df[taxa] = new_dict
return prob_df
# per-species datasaet-wide
# 2. sklearn statistics
# test
# to get per-class: micro/macro/weighted to get per-sample: samples
# model test taxa met1, met2, ... metn
# maxent T spec 1.2 3.4 3.3
# maxent F fam 1.2 3.4 3.3
# tresnet F spec 1.2 3.4 3.3
# ...
def sklearn_per_taxa_overall(pres_df, ground_truth, mets, taxa_names):
num_models = len(list(pres_df.values())[0].keys())
num_taxa = len(pres_df)
num_rows = num_models * num_taxa *2 # the two is one for test, one for train
score_name = [s.__name__ for s in mets]
extra = ['model', 'taxa', 'test']
score_idx = {score : (i + len(extra)) for score, i in zip(score_name, range(len(score_name)))}
df_names = extra + score_name
model_idx, taxa_idx, test_idx = 0, 1, 2
filler = np.zeros([num_rows, len(df_names)])
results_df = pd.DataFrame(filler, columns=df_names)
# TODO: will break if there aren't observations for train points.../def
i = 0
# one entry per-taxa
for taxa, dic in pres_df.items():
# one entry per-model
for name, df in dic.items():
col_2_keep = list(taxa_names[taxa])
gt = ground_truth[taxa]
# grab test predictions
curr_df = df[df.test]
obs = curr_df[col_2_keep].values
curr_gt = gt[gt.test]
curr_yt = curr_gt[col_2_keep].values
results_df = run_metrics(results_df, curr_yt, obs, mets, score_idx, i)
results_df.iloc[i, taxa_idx] = taxa
results_df.iloc[i, test_idx] = 'test'
results_df.iloc[i, model_idx] = name
i += 1
# train
curr_df = df[~df.test]
obs = curr_df[col_2_keep].values
curr_gt = gt[~gt.test]
curr_yt = curr_gt[col_2_keep].values
# presence / absence metrics
results_df = run_metrics(results_df, curr_yt, obs, mets, score_idx, i)
results_df.iloc[i, taxa_idx] = taxa
results_df.iloc[i, test_idx] = 'train'
results_df.iloc[i, model_idx] = name
i += 1
return results_df
def run_metrics(df, y_true, y_obs, mets, score_idx, i):
# probability weight metrics
for met in mets:
tick = time.time()
if met.__name__ == 'accuracy_score' or met.__name__ == 'roc_auc' :
out = met(y_true, y_obs)
else:
out = met(y_true, y_obs, average='weighted')
idx = score_idx[met.__name__]
df.iloc[i, idx] = out
tock = time.time()
print("{} took {} minutes".format(met.__name__, ((tock-tick)/60)))
return df
# per-taxa across egoregions
# 2. sklearn statistics
# test
# to get per-class: micro/macro/weighted to get per-sample: samples
# model eco test taxa met1, met2, ... metn
# maxent E1 T spec 1.2 3.4 3.3
# maxent E2 F fam 1.2 3.4 3.3
# tresnet E1 F spec 1.2 3.4 3.3
# ...
# per-species datasaet-wide
# 2. sklearn statistics
# test
# to get per-class: micro/macro/weighted to get per-sample: samples
# model test taxa met1, met2, ... metn
# maxent T spec 1.2 3.4 3.3
# maxent F fam 1.2 3.4 3.3
# tresnet F spec 1.2 3.4 3.3
# ...
def sklearn_per_taxa_ecoregion(pres_df, ground_truth, mets, taxa_names, econame):
# setting up dataframe
num_models = len(list(pres_df.values())[0].keys())
num_taxa = len(pres_df)
num_ecoregions = len(list(list(pres_df.values())[0].values())[0][econame].unique())
num_rows = num_models * num_taxa *2 * num_ecoregions # the two is one for test, one for train
score_name = [s.__name__ for s in mets]
extra = ['model', 'taxa', 'test', 'ecoregion']
score_idx = {score : (i + len(extra)) for score, i in zip(score_name, range(len(score_name)))}
df_names = extra + score_name
model_idx, taxa_idx, test_idx, eco_idx = 0, 1, 2, 3
filler = np.full([num_rows, len(df_names)], -1)
results_df_reg = pd.DataFrame(filler, columns=df_names)
# fill in dataframe
i = 0
# one entry per-taxa
for taxa, dic in pres_df.items():
# one entry per-model
for name, dff in dic.items():
for ecoregion, df in dff.groupby(econame):
col_2_keep = list(taxa_names[taxa])
gt = ground_truth[taxa]
# test
curr_df = df[df.test]
obs = curr_df[col_2_keep].values
# if no observations in the given ecoregion, move on
if len(obs) < 1:
continue
curr_gt = gt[gt.test]
curr_gt = curr_gt[curr_gt[econame] == ecoregion]
curr_yt = curr_gt[col_2_keep].values
results_df_reg = run_metrics(results_df_reg, curr_yt, obs, mets, score_idx, i)
results_df_reg.iloc[i, taxa_idx] = taxa
results_df_reg.iloc[i, test_idx] = 'test'
results_df_reg.iloc[i, model_idx] = name
results_df_reg.iloc[i, eco_idx] = ecoregion
i += 1
# copying code, bad but here we are..
# train
curr_df = df[~df.test]
obs = curr_df[col_2_keep].values
if len(obs) < 1:
continue
curr_gt = gt[~gt.test]
curr_gt = curr_gt[curr_gt[econame] == ecoregion]
curr_yt = curr_gt[col_2_keep].values
results_df_reg = run_metrics(results_df_reg, curr_yt, obs, mets, score_idx, i)
results_df_reg.iloc[i, taxa_idx] = taxa
results_df_reg.iloc[i, test_idx] = 'train'
results_df_reg.iloc[i, model_idx] = name
results_df_reg.iloc[i, eco_idx] = ecoregion
i += 1
return results_df_reg
# 3. per-species statistics
# have one df for each taxa
# model support test metric sp1 sp2 ...
# random T T f1 .2 .23
# add one line for each of the above
# gotta manually go taxa at a time
def sklearn_per_taxa_individual(yo, yt, taxa_names):
# one entry per-model
num_models = len(yo.keys())
num_rows = num_models *2 * 4 # the two is one for test, one for train
score_name = ['metric', 'model', 'support', 'test']
col_2_keep = list(taxa_names)
df_names = score_name + col_2_keep
metric_idx, model_idx, support_idx, test_idx = 0, 1, 2, 3
filler = np.full([num_rows, len(df_names)], 'NaNNANANANANANANANANAANANANANANANANAN')
yt_test = yt[yt.test]
yt_test = yt_test[col_2_keep]
yt_test = yt_test.values
yt_train = yt[~yt.test]
yt_train = yt_train[col_2_keep]
yt_train - yt_train.values
i = 0
m_names = ["precision", "recall", "fbeta"]
for name, df in yo.items():
tick = time.time()
# test
# get data
obs_test = df[df.test]
obs_test = obs_test[col_2_keep].values
# calculate metrics using sklearn
precision, recall, fbeta, support = metrics.precision_recall_fscore_support(yt_test, obs_test)
# save results in dataframe
for mt in m_names:
filler[i, metric_idx] = mt
filler[i, model_idx] = name
filler[i, support_idx] = support
filler[i, test_idx] = 'test'
filler[i, (test_idx+1):] = locals()[mt]
i += 1
# train
# get train data
obs_train = df[~df.test]
obs_train = obs_train[col_2_keep].values
# calculate metrics using sklearn
precision, recall, fbeta, support = metrics.precision_recall_fscore_support(yt_train, obs_train)
# save results in dataframe
for mt in m_names:
filler[i, metric_idx] = mt
filler[i, model_idx] = name
filler[i, support_idx] = support
filler[i, test_idx] = 'train'
filler[i, (test_idx+1):] = locals()[mt]
i += 1
tock = time.time()
print("{} took {} minutes".format(name, ((tock-tick)/60)))
per_spec_df =
|
pd.DataFrame(filler, columns=df_names)
|
pandas.DataFrame
|
import unittest
import dolphindb as ddb
import numpy as np
import pandas as pd
from setup import HOST, PORT, WORK_DIR, DATA_DIR
from numpy.testing import assert_array_equal, assert_array_almost_equal
from pandas.testing import assert_series_equal
from pandas.testing import assert_frame_equal
class TestBasicDataTypes(unittest.TestCase):
@classmethod
def setUp(cls):
cls.s = ddb.session()
cls.s.connect(HOST, PORT, "admin", "123456")
@classmethod
def tearDownClass(cls):
pass
def test_int_scalar(self):
re = self.s.run("100")
self.assertEqual(re, 100)
re = self.s.run("int()")
self.assertIsNone(re)
def test_bool_scalar(self):
re = self.s.run("true")
self.assertEqual(re, True)
re = self.s.run("bool()")
self.assertIsNone(re)
def test_char_scalar(self):
re = self.s.run("'a'")
self.assertEqual(re, 97)
re = self.s.run("char()")
self.assertIsNone(re)
def test_short_scalar(self):
re = self.s.run("112h")
self.assertEqual(re, 112)
re = self.s.run("short()")
self.assertIsNone(re)
def test_long_scalar(self):
re = self.s.run("22l")
self.assertEqual(re, 22)
re = self.s.run("long()")
self.assertIsNone(re)
def test_date_scalar(self):
re = self.s.run("2012.06.12")
self.assertEqual(re, np.datetime64('2012-06-12'))
re = self.s.run("date()")
self.assertIsNone(re)
def test_month_scalar(self):
re = self.s.run("2012.06M")
self.assertEqual(re, np.datetime64('2012-06'))
re = self.s.run("month()")
self.assertIsNone(re)
def test_time_scalar(self):
re = self.s.run("12:30:00.008")
self.assertEqual(re, np.datetime64('1970-01-01T12:30:00.008'))
re = self.s.run("time()")
self.assertIsNone(re)
def test_minute_scalar(self):
re = self.s.run("12:30m")
self.assertEqual(re, np.datetime64('1970-01-01T12:30'))
re = self.s.run("minute()")
self.assertIsNone(re)
def test_second_scalar(self):
re = self.s.run("12:30:10")
self.assertEqual(re, np.datetime64('1970-01-01T12:30:10'))
re = self.s.run("second()")
self.assertIsNone(re)
def test_datetime_scalar(self):
re = self.s.run('2012.06.13 13:30:10')
self.assertEqual(re, np.datetime64('2012-06-13T13:30:10'))
re = self.s.run("datetime()")
self.assertIsNone(re)
def test_timestamp_scalar(self):
re = self.s.run('2012.06.13 13:30:10.008')
self.assertEqual(re, np.datetime64('2012-06-13T13:30:10.008'))
re = self.s.run("timestamp()")
self.assertIsNone(re)
def test_nanotime_scalar(self):
re = self.s.run('13:30:10.008007006')
self.assertEqual(re, np.datetime64('1970-01-01T13:30:10.008007006'))
re = self.s.run("nanotime()")
self.assertIsNone(re)
def test_nanotimestamp_scalar(self):
re = self.s.run('2012.06.13 13:30:10.008007006')
self.assertEqual(re, np.datetime64('2012-06-13T13:30:10.008007006'))
re = self.s.run("nanotimestamp()")
self.assertIsNone(re)
def test_float_scalar(self):
re = self.s.run('2.1f')
self.assertEqual(round(re), 2)
re = self.s.run("float()")
self.assertIsNone(re)
def test_double_scalar(self):
re = self.s.run('2.1')
self.assertEqual(re, 2.1)
re = self.s.run("double()")
self.assertIsNone(re)
def test_string_scalar(self):
re = self.s.run('"abc"')
self.assertEqual(re, 'abc')
re = self.s.run("string()")
self.assertIsNone(re)
def test_uuid_scalar(self):
re = self.s.run("uuid('5d212a78-cc48-e3b1-4235-b4d91473ee87')")
self.assertEqual(re, '5d212a78-cc48-e3b1-4235-b4d91473ee87')
re = self.s.run("uuid()")
self.assertIsNone(re)
def test_ipaddr_sclar(self):
re = self.s.run("ipaddr('192.168.1.135')")
self.assertEqual(re, '192.168.1.135')
re = self.s.run("ipaddr()")
self.assertIsNone(re)
def test_int128_scalar(self):
re = self.s.run("int128('e1671797c52e15f763380b45e841ec32')")
self.assertEqual(re, 'e1671797c52e15f763380b45e841ec32')
re = self.s.run("int128()")
self.assertIsNone(re)
def test_python_datetime64_dolphindb_date_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('date', ts), np.datetime64('2019-01-01'))
def test_python_datetime64_dolphindb_month_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('month', ts), np.datetime64('2019-01'))
def test_python_datetime64_dolphindb_time_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('time', ts), np.datetime64('1970-01-01T20:01:01.122'))
def test_python_datetime64_dolphindb_minute_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('minute', ts), np.datetime64('1970-01-01T20:01'))
def test_python_datetime64_dolphindb_second_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('second', ts), np.datetime64('1970-01-01T20:01:01'))
def test_python_datetime64_dolphindb_datetime_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('datetime', ts), np.datetime64('2019-01-01T20:01:01'))
def test_python_datetime64_dolphindb_timestamp_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('timestamp', ts), np.datetime64('2019-01-01T20:01:01.122'))
def test_python_datetime64_dolphindb_nanotime_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('nanotime', ts), np.datetime64('1970-01-01T20:01:01.122346100'))
def test_python_datetime64_dolphindb_nanotimestamp_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('nanotimestamp', ts), np.datetime64('2019-01-01T20:01:01.122346100'))
def test_string_vector(self):
re = self.s.run("`IBM`GOOG`YHOO")
self.assertEqual((re == ['IBM', 'GOOG', 'YHOO']).all(), True)
re = self.s.run("['IBM', string(), 'GOOG']")
self.assertEqual((re==['IBM', '', 'GOOG']).all(), True)
re = self.s.run("[string(), string(), string()]")
self.assertEqual((re==['','','']).all(), True)
def test_function_def(self):
re = self.s.run("def f(a,b){return a+b}")
re = self.s.run("f(1, 2)")
self.assertEqual(re, 3)
def test_symbol_vector(self):
re = self.s.run("symbol(`IBM`MSFT`GOOG`BIDU)")
self.assertEqual((re == ['IBM', 'MSFT', 'GOOG', 'BIDU']).all(), True)
re = self.s.run("symbol(['IBM', '', 'GOOG'])")
self.assertEqual((re==['IBM', '', 'GOOG']).all(), True)
re = self.s.run("symbol(['', '', ''])")
self.assertEqual((re==['', '', '']).all(), True)
def test_char_vector(self):
re = self.s.run("['a', 'b', 'c']")
expected = [97, 98, 99]
self.assertEqual((re==expected).all(), True)
re = self.s.run("['a', char(), 'c']")
expected = [97.0, np.nan, 99.0]
assert_array_almost_equal(re, expected)
def test_bool_vector(self):
re = self.s.run("[true, false, true]")
expected = [True, False, True]
assert_array_equal(re, expected)
re = self.s.run("[true, false, bool()]")
assert_array_equal(re[0:2], [True, False])
self.assertTrue(np.isnan(re[2]))
re = self.s.run("[bool(), bool(), bool()]")
self.assertTrue(np.isnan(re[0]))
self.assertTrue(np.isnan(re[1]))
self.assertTrue(np.isnan(re[2]))
def test_int_vector(self):
re = self.s.run("2938 2920 54938 1999 2333")
self.assertEqual((re == [2938, 2920, 54938, 1999, 2333]).all(), True)
re = self.s.run("[2938, int(), 6552]")
expected = [2938.0, np.nan, 6552.0]
assert_array_almost_equal(re, expected, 1)
re = self.s.run("[int(), int(), int()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_short_vector(self):
re = self.s.run("[10h, 11h, 12h]")
expected = [10, 11, 12]
assert_array_equal(re, expected)
re = self.s.run("[10h, short(), 12h]")
expected = [10.0, np.nan, 12.0]
assert_array_almost_equal(re, expected)
re = self.s.run("[short(), short(), short()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_long_vector(self):
re = self.s.run("[10l, 11l, 12l]")
expected = [10, 11, 12]
assert_array_equal(re, expected)
re = self.s.run("[10l, long(), 12l]")
expected = [10.0, np.nan, 12.0]
assert_array_almost_equal(re, expected)
re = self.s.run("[long(), long(), long()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_double_vector(self):
re = self.s.run("rand(10.0,10)")
self.assertEqual(len(re), 10)
re = self.s.run("[12.5, 26.0, double()]")
expected = [12.5, 26.0, np.nan]
assert_array_almost_equal(re, expected)
re = self.s.run("[double(), double(), double()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_float_vector(self):
re = self.s.run("[12.5f, 26.34f, 25.896f]")
expected = [12.5, 26.34, 25.896]
assert_array_almost_equal(re, expected, 3)
re = self.s.run("[12.5f, float(), 25.896f]")
expected = [12.5, np.nan, 25.896]
assert_array_almost_equal(re, expected, 3)
re = self.s.run("[float(), float(), float()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_date_vector(self):
re = self.s.run("2012.10.01 +1..3")
expected = np.array(['2012-10-02','2012-10-03','2012-10-04'], dtype="datetime64")
self.assertEqual((re == expected).all(), True)
re = self.s.run("[2012.06.01, date(), 2012.06.03]")
expected = np.array(['2012-06-01', 'NaT', '2012-06-03'], dtype="datetime64")
assert_array_equal(re, expected)
re = self.s.run("[date(), date(), date()]")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_month_vector(self):
re = self.s.run("[2012.06M, 2012.07M, 2012.08M]")
expected = [np.datetime64('2012-06'), np.datetime64('2012-07'), np.datetime64('2012-08')]
assert_array_equal(re, expected)
re = self.s.run("[2012.06M, month(), 2012.08M]")
expected = [np.datetime64('2012-06'), np.datetime64('NaT'), np.datetime64('2012-08')]
assert_array_equal(re, expected)
re = self.s.run("take(month(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_time_vector(self):
re = self.s.run("[12:30:10.008, 12:30:10.009, 12:30:10.010]")
expected = [np.datetime64('1970-01-01T12:30:10.008'), np.datetime64('1970-01-01T12:30:10.009'), np.datetime64('1970-01-01T12:30:10.010')]
assert_array_equal(re, expected)
re = self.s.run("[12:30:10.008, NULL, 12:30:10.010]")
expected = [np.datetime64('1970-01-01T12:30:10.008'), np.datetime64('NaT'), np.datetime64('1970-01-01T12:30:10.010')]
assert_array_equal(re, expected)
re = self.s.run("take(time(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_minute_vector(self):
re = self.s.run("[13:30m, 13:34m, 13:35m]")
expected = [np.datetime64('1970-01-01T13:30'), np.datetime64('1970-01-01T13:34'), np.datetime64('1970-01-01T13:35')]
assert_array_equal(re, expected)
re = self.s.run("[13:30m, minute(), 13:35m]")
expected = [np.datetime64('1970-01-01T13:30'), np.datetime64('NaT'), np.datetime64('1970-01-01T13:35')]
assert_array_equal(re, expected)
re = self.s.run("take(minute(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_second_vector(self):
re = self.s.run("[13:30:10, 13:30:11, 13:30:12]")
expected = [np.datetime64('1970-01-01T13:30:10'), np.datetime64('1970-01-01T13:30:11'), np.datetime64('1970-01-01T13:30:12')]
assert_array_equal(re, expected)
re = self.s.run("[13:30:10, second(), 13:30:12]")
expected = [np.datetime64('1970-01-01T13:30:10'), np.datetime64('NaT'), np.datetime64('1970-01-01T13:30:12')]
assert_array_equal(re, expected)
re = self.s.run("take(second(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_datetime_vector(self):
re = self.s.run("2012.10.01T15:00:04 + 2009..2011")
expected = np.array(['2012-10-01T15:33:33', '2012-10-01T15:33:34', '2012-10-01T15:33:35'], dtype="datetime64")
self.assertEqual((re == expected).all(), True)
re = self.s.run("[2012.06.01T12:30:00, datetime(), 2012.06.02T12:30:00]")
expected = np.array(['2012-06-01T12:30:00', 'NaT', '2012-06-02T12:30:00'], dtype="datetime64")
assert_array_equal(re, expected)
def test_timestamp_vector(self):
re = self.s.run("[2012.06.13T13:30:10.008, 2012.06.13T13:30:10.009, 2012.06.13T13:30:10.010]")
expected = [np.datetime64('2012-06-13T13:30:10.008'), np.datetime64('2012-06-13T13:30:10.009'), np.datetime64('2012-06-13T13:30:10.010')]
assert_array_equal(re, expected)
re = self.s.run("[2012.06.13T13:30:10.008, NULL, 2012.06.13T13:30:10.010]")
expected = [np.datetime64('2012-06-13T13:30:10.008'), np.datetime64('NaT'), np.datetime64('2012-06-13T13:30:10.010')]
assert_array_equal(re, expected)
re = self.s.run("take(timestamp(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_nanotime_vector(self):
re = self.s.run("[13:30:10.008007006, 13:30:10.008007007, 13:30:10.008007008]")
expected = [np.datetime64('1970-01-01T13:30:10.008007006'), np.datetime64('1970-01-01T13:30:10.008007007'), np.datetime64('1970-01-01T13:30:10.008007008')]
assert_array_equal(re, expected)
re = self.s.run("[13:30:10.008007006, NULL, 13:30:10.008007008]")
expected = [np.datetime64('1970-01-01T13:30:10.008007006'), np.datetime64('NaT'), np.datetime64('1970-01-01T13:30:10.008007008')]
assert_array_equal(re, expected)
re = self.s.run("take(nanotime(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_nanotimestamp_vector(self):
re = self.s.run("[2012.06.13T13:30:10.008007006, 2012.06.13T13:30:10.008007007, 2012.06.13T13:30:10.008007008]")
expected = [np.datetime64('2012-06-13T13:30:10.008007006'), np.datetime64('2012-06-13T13:30:10.008007007'), np.datetime64('2012-06-13T13:30:10.008007008')]
assert_array_equal(re, expected)
re = self.s.run("[2012.06.13T13:30:10.008007006, NULL, 2012.06.13T13:30:10.008007008]")
expected = [np.datetime64('2012-06-13T13:30:10.008007006'), np.datetime64('NaT'), np.datetime64('2012-06-13T13:30:10.008007008')]
assert_array_equal(re, expected)
re = self.s.run("take(nanotimestamp(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_uuid_vector(self):
re = self.s.run("uuid(['5d212a78-cc48-e3b1-4235-b4d91473ee87', '5d212a78-cc48-e3b1-4235-b4d91473ee88', '5d212a78-cc48-e3b1-4235-b4d91473ee89'])")
expected = ['5d212a78-cc48-e3b1-4235-b4d91473ee87', '5d212a78-cc48-e3b1-4235-b4d91473ee88', '5d212a78-cc48-e3b1-4235-b4d91473ee89']
assert_array_equal(re, expected)
re = self.s.run("uuid(['5d212a78-cc48-e3b1-4235-b4d91473ee87', '', '5d212a78-cc48-e3b1-4235-b4d91473ee89'])")
expected = ['5d212a78-cc48-e3b1-4235-b4d91473ee87', '00000000-0000-0000-0000-000000000000', '5d212a78-cc48-e3b1-4235-b4d91473ee89']
assert_array_equal(re, expected)
re = self.s.run("uuid(['', '', ''])")
expected = ['00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000']
assert_array_equal(re, expected)
def test_ipaddr_vector(self):
re = self.s.run("ipaddr(['192.168.1.135', '192.168.1.124', '192.168.1.14'])")
expected = ['192.168.1.135', '192.168.1.124', '192.168.1.14']
assert_array_equal(re, expected)
re = self.s.run("ipaddr(['192.168.1.135', '', '192.168.1.14'])")
expected = ['192.168.1.135', '0.0.0.0', '192.168.1.14']
assert_array_equal(re, expected)
re = self.s.run("ipaddr(['', '', ''])")
expected = ['0.0.0.0', '0.0.0.0', '0.0.0.0']
assert_array_equal(re, expected)
def test_int128_vector(self):
re = self.s.run("int128(['e1671797c52e15f763380b45e841ec32', 'e1671797c52e15f763380b45e841ec33', 'e1671797c52e15f763380b45e841ec34'])")
expected = ['e1671797c52e15f763380b45e841ec32', 'e1671797c52e15f763380b45e841ec33', 'e1671797c52e15f763380b45e841ec34']
assert_array_equal(re, expected)
re = self.s.run("int128(['e1671797c52e15f763380b45e841ec32', '', 'e1671797c52e15f763380b45e841ec34'])")
expected = ['e1671797c52e15f763380b45e841ec32', '00000000000000000000000000000000', 'e1671797c52e15f763380b45e841ec34']
assert_array_equal(re, expected)
re = self.s.run("int128(['', '', ''])")
expected = ['00000000000000000000000000000000', '00000000000000000000000000000000', '00000000000000000000000000000000']
assert_array_equal(re, expected)
def test_int_matrix(self):
re = self.s.run("1..6$3:2")
expected = np.array([[1, 4], [2, 5], [3, 6]])
assert_array_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_short_matrix(self):
re = self.s.run("short(1..6)$3:2")
expected = np.array([[1, 4], [2, 5], [3, 6]])
assert_array_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_long_matrix(self):
re = self.s.run("long(1..6)$3:2")
expected = np.array([[1, 4], [2, 5], [3, 6]])
assert_array_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_double_matrix(self):
re = self.s.run("[1.1, 1.2, 1.3, 1.4, 1.5, 1.6]$3:2")
expected = [[1.1, 1.4], [1.2, 1.5], [1.3, 1.6]]
assert_array_almost_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_float_matrix(self):
re = self.s.run("[1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f]$3:2")
expected = [[1.1, 1.4], [1.2, 1.5], [1.3, 1.6]]
assert_array_almost_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_symbol_matrix(self):
re = self.s.run('symbol("A"+string(1..9))$3:3')
expected = np.array([["A1","A4","A7"], ["A2","A5","A8"], ["A3","A6","A9"]])
assert_array_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_huge_matrix(self):
re = self.s.run('matrix(loop(take{, 3000}, 1..3000))')
expected = np.arange(1, 3001)
for i in np.arange(0, 3000):
assert_array_equal(re[0][i], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_one_column_matrix(self):
re = self.s.run('matrix(1..3000000)')
for i in np.arange(0, 3000000):
assert_array_equal(re[0][i], [i+1])
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_one_row_matrix(self):
re = self.s.run("matrix(take(1, 5000)).transpose()")
assert_array_equal(re[0], [np.repeat(1, 5000)])
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_zero_column_matrix(self):
re = self.s.run("matrix(INT, 3, 0)")
expected = [[] for i in range(3)]
assert_array_equal(re[0], expected)
re = self.s.run("matrix(BOOL,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,bool )
re = self.s.run("matrix(CHAR,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'int8')
re = self.s.run("matrix(SHORT,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'int16')
re = self.s.run("matrix(LONG,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'int64')
re = self.s.run("matrix(DATE,3,0)")
expected = np.empty((3,0),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[ns]')
re = self.s.run("matrix(MONTH,3,0)")
expected = np.empty((3,0),dtype="datetime64[M]")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[M]')
re = self.s.run("matrix(TIME,3,0)")
expected = np.empty((3,0),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[ns]')
re = self.s.run("matrix(MINUTE,3,0)")
expected = np.empty((3,0),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[ns]')
re = self.s.run("matrix(SECOND,3,0)")
expected = np.empty((3,0),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(DATETIME,3,0)")
expected = np.empty((3,0),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[ns]')
re = self.s.run("matrix(TIMESTAMP,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[ns]')
re = self.s.run("matrix(NANOTIME,3,0)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(NANOTIMESTAMP,3,0)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(FLOAT,3,0)")
expected = np.empty((3,0),dtype="float32")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(DOUBLE,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,"float64")
re = self.s.run("matrix(SYMBOL,3,0)")
assert_array_equal(re[0], expected)
def test_zero_row_matrix(self):
re = self.s.run("matrix(INT, 0, 3)")
expected = np.empty((0,3),dtype="int32")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(BOOL,0,3)")
expected = np.empty((0,3),dtype="bool")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(CHAR,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(SHORT,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(LONG,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(DATE,0,3)")
expected = np.empty((0,3),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(MONTH,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(TIME,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(MINUTE,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(SECOND,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(DATETIME,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(TIMESTAMP,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(NANOTIME,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(NANOTIMESTAMP,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(FLOAT,0,3)")
expected = np.empty((0,3),dtype="float32")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(DOUBLE,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(SYMBOL,0,3)")
assert_array_equal(re[0], expected)
def test_all_null_matrix(self):
re = self.s.run("take(int(), 12)$3:4")
expected=[[np.NaN, np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN, np.NaN]]
assert_array_equal(re[0], expected)
re = self.s.run("[1, 2, NULL, 3, NULL, 4]$2:3")
expected=[[1, np.NaN, np.NaN], [2., 3., 4.]]
assert_array_equal(re[0], expected)
re = self.s.run("symbol(take(string(), 12))$3:4")
assert_array_equal(re[0][0], ['','','',''])
assert_array_equal(re[0][1], ['','','',''])
assert_array_equal(re[0][2], ['','','',''])
re = self.s.run("symbol(['AA', 'BB', NULL, 'CC', NULL, 'DD'])$2:3")
assert_array_equal(re[0][0], ['AA','',''])
assert_array_equal(re[0][1], ['BB','CC','DD'])
def test_huge_symbol_matrix(self):
re = self.s.run("m = symbol(string(1..1000000))$200:5000;m.rename!(1..200,1..5000);m")
assert_array_equal(re[1], np.arange(1, 201))
assert_array_equal(re[2], np.arange(1, 5001))
re = self.s.run("m = symbol(string(1..1000000))$200:5000;m.rename!(1..200,1..5000);table(m.rowNames() as label, m)")
assert_array_equal(re["label"], np.arange(1, 201))
j=1
for i in np.arange(1, 5001):
assert_series_equal(re.iloc[:,i], pd.Series([str(x) for x in np.arange(j, j+200)], index=np.arange(0, 200)),check_names=False)
j+=200
def test_int_matrix_with_label(self):
re = self.s.run("cross(add,1..5,1..10)")
expected = np.array(
[[2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
[5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [6, 7, 8, 9, 10, 11, 12, 13, 14, 15]])
#self.assertEqual((re == expected).all(), True)
assert_array_equal(re[0], expected)
assert_array_equal(re[1], np.array([1, 2, 3, 4, 5]))
assert_array_equal(re[2], np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]))
def test_matrix_only_with_row_label(self):
re = self.s.run("m=1..6$3:2;m.rename!([0, 1, 2],);m")
expected = [[1, 4], [2, 5], [3, 6]]
assert_array_equal(re[0], expected)
assert_array_equal(re[1], [0, 1, 2])
self.assertIsNone(re[2])
def test_matrix_only_with_col_label(self):
re = self.s.run("m=1..6$3:2;m.rename!([0, 1]);m")
expected = [[1, 4], [2, 5], [3, 6]]
assert_array_equal(re[0], expected)
self.assertIsNone(re[1])
assert_array_equal(re[2], [0, 1])
def test_matrix_label_date_symbol(self):
script='''
m=matrix([2200, 1300, 2500, 8800], [6800, 5400, NULL, NULL], [1900, 2100, 3200, NULL]).rename!(2012.01.01..2012.01.04, symbol(`C`IBM`MS));
m
'''
re = self.s.run(script)
expected=[[2200., 6800.,1900.],[1300.,5400.,2100.],[2500.,np.NaN,3200.],[8800.,np.NaN, np.NaN]]
assert_array_almost_equal(re[0], expected)
assert_array_equal(re[1], np.array(['2012-01-01T00:00:00.000000000', '2012-01-02T00:00:00.000000000','2012-01-03T00:00:00.000000000', '2012-01-04T00:00:00.000000000'], dtype="datetime64"))
assert_array_equal(re[2], ['C', 'IBM', 'MS'])
def test_matrix_label_second_symbol(self):
script='''
m=matrix([2200, 1300, 2500, 8800], [6800, 5400, NULL, NULL], [1900, 2100, 3200, NULL]).rename!([09:30:00, 10:00:00, 10:30:00, 11:00:00], `C`IBM`MS)
m
'''
re = self.s.run(script)
expected=[[2200., 6800.,1900.],[1300.,5400.,2100.],[2500.,np.NaN,3200.],[8800.,np.NaN, np.NaN]]
assert_array_almost_equal(re[0], expected)
assert_array_equal(re[1], np.array(['1970-01-01T09:30:00.000000000', '1970-01-01T10:00:00.000000000','1970-01-01T10:30:00.000000000', '1970-01-01T11:00:00.000000000'], dtype="datetime64"))
assert_array_equal(re[2], ['C', 'IBM', 'MS'])
def test_matrix_label_symbol_date(self):
script='''
m=matrix([2200, 1300, 2500, 8800], [6800, 5400, NULL, NULL], [1900, 2100, 3200, NULL]).rename!(`C`IBM`MS`ZZ, 2012.01.01..2012.01.03)
m
'''
re = self.s.run(script)
expected=[[2200., 6800.,1900.],[1300.,5400.,2100.],[2500.,np.NaN,3200.],[8800.,np.NaN, np.NaN]]
assert_array_almost_equal(re[0], expected)
assert_array_equal(re[1], ['C', 'IBM', 'MS', 'ZZ'])
assert_array_equal(re[2], np.array(['2012-01-01T00:00:00.000000000', '2012-01-02T00:00:00.000000000',
'2012-01-03T00:00:00.000000000'],dtype="datetime64"))
def test_table(self):
script = '''n=20;
syms=`IBM`C`MS`MSFT`JPM`ORCL`BIDU`SOHU`GE`EBAY`GOOG`FORD`GS`PEP`USO`GLD`GDX`EEM`FXI`SLV`SINA`BAC`AAPL`PALL`YHOO`KOH`TSLA`CS`CISO`SUN;
mytrades=table(09:30:00+rand(18000,n) as timestamp,rand(syms,n) as sym, 10*(1+rand(100,n)) as qty,5.0+rand(100.0,n) as price);
select qty,price from mytrades where sym==`IBM;'''
re = self.s.run(script)
self.assertEqual(re.shape[1], 2)
def test_dictionary(self):
script = '''dict(1 2 3,`IBM`MSFT`GOOG)'''
re = self.s.run(script)
expected = {2: 'MSFT', 3: 'GOOG', 1: 'IBM'}
self.assertDictEqual(re, expected)
def test_any_vector(self):
re = self.s.run("([1], [2],[1,3, 5],[0.9, 0.8])")
self.assertEqual((re[0] == [1]).all(), True)
self.assertEqual((re[1] == [2]).all(), True)
self.assertEqual((re[2] == [1, 3, 5]).all(), True)
def test_set(self):
re = self.s.run("set(1+3*1..3)")
self.assertSetEqual(re, {10, 4, 7})
def test_pair(self):
re = self.s.run("3:4")
self.assertListEqual(re, list([3, 4]))
def test_any_dictionary(self):
re = self.s.run("{a:1,b:2}")
expected = {'a': 1, 'b': 2}
self.assertDictEqual(re, expected)
def test_upload_matrix(self):
a = self.s.run("cross(+, 1..5, 1..5)")
b = self.s.run("1..25$5:5")
self.s.upload({'a': a, 'b': b})
re = self.s.run('a+b')
# print(re)
# self.assertEqual((re[0] == [3, 9, 15, 21, 27]).all(), True)
# self.assertEqual((re[1] == [5, 11, 17, 23, 29]).all(), True)
# self.assertEqual((re[2] == [7, 13, 19, 25, 31]).all(), True)
# self.assertEqual((re[3] == [9, 15, 21, 27, 33]).all(), True)
# self.assertEqual((re[4] == [11, 17, 23, 29, 35]).all(), True)
def test_run_plot(self):
script = '''
x=1..10
t = table(x as sin, x+100 as cos)
plot(t)
'''
re = self.s.run(script)
assert_array_equal(re['data'][0], np.array([[1, 101], [2, 102], [3, 103], [4, 104], [5, 105], [6, 106], [7, 107], [8, 108], [9, 109], [10, 110]]))
self.assertIsNone(re['data'][1])
assert_array_equal(re['data'][2], np.array(['sin', 'cos']))
assert_array_equal(re['title'], np.array(['', '', '']))
def test_table_datatypes(self):
script='''
n = 200
a = 100
v1 = string(1..n)
v2 = string(1..n)
v3 = take(int128("fcc69bca9885b51962660c23d08c124a"),n-a).join(take(int128("a428d55098d8e41e8adc4b7d04d8ede1"),a))
v4 = take(uuid("407c628e-d319-25c1-17ee-e5a73500a010"),n-a).join(take(uuid("d7a39280-1b18-8f56-160c-beabd428c934"),a))
v5 = take(ipaddr("fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"),n-a).join(take(ipaddr("fc00:db20:35b:7399::5"),a))
t = table(n:n,`val1`val2`val3`val4`val5,[SYMBOL,STRING,INT128,UUID,IPADDR])
t[`val1] = v1
t[`val2] = v2
t[`val3] = v3
t[`val4] = v4
t[`val5] = v5
'''
self.s.run(script)
df1 = self.s.run("select val1 from t")
df2 = self.s.run("select val2 from t")
df3 = self.s.run("select val3 from t")
df4 = self.s.run("select val4 from t")
df5 = self.s.run("select val5 from t")
df = self.s.run("select * from t")
n = 200
a = 100
data1 = np.array(range(1,n+1),dtype="str")
data2 = np.append(np.repeat("fcc69bca9885b51962660c23d08c124a",n-a),np.repeat("a428d55098d8e41e8adc4b7d04d8ede1",a))
data3 = np.append(np.repeat("407c628e-d319-25c1-17ee-e5a73500a010",n-a),np.repeat("d7a39280-1b18-8f56-160c-beabd428c934",a))
data4 = np.append(np.repeat("fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",n-a),np.repeat("3fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",a))
ex1 = pd.DataFrame({"val1":data1})
ex2 = pd.DataFrame({"val2":data1})
ex3 = pd.DataFrame({"val3":data2})
ex4 = pd.DataFrame({"val4":data3})
ex5 = pd.DataFrame({"val5":data4})
ex = pd.DataFrame({"val1":data1,"val2":data1,"val3":data2,"val4":data3,"val5":data4})
assert_frame_equal(df1, ex1)
assert_frame_equal(df2, ex2)
assert_frame_equal(df3, ex3)
assert_frame_equal(df4, ex4)
assert_frame_equal(df5, ex5)
assert_frame_equal(df, ex)
def test_table_datatypes_with_null(self):
script='''
n = 100
a = 50
v1=string(1..(n-a)).join(take(string(),a))
v2 = v1
v3 = take(int128("fcc69bca9885b51962660c23d08c124a"),n-a).join(take(int128(),a))
v4 = take(uuid("407c628e-d319-25c1-17ee-e5a73500a010"),n-a).join(take(uuid(),a))
v5 = take(ipaddr("fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"),n-a).join(take(ipaddr(),a))
t = table(n:n,`val1`val2`val3`val4`val5,[SYMBOL,STRING,INT128,UUID,IPADDR])
t[`val1] = v1
t[`val2] = v2
t[`val3] = v3
t[`val4] = v4
t[`val5] = v5
'''
self.s.run(script)
df1 = self.s.run("select val1 from t")
df2 = self.s.run("select val2 from t")
df3 = self.s.run("select val3 from t")
df4 = self.s.run("select val4 from t")
df5 = self.s.run("select val5 from t")
df = self.s.run("select * from t")
n = 100
a = 50
arr1 = np.append(np.array(range(1,n-a+1),dtype="str"),np.repeat("",a))
arr2 = np.append(np.repeat("fcc69bca9885b51962660c23d08c124a",n-a),np.repeat("00000000000000000000000000000000",a))
arr3 = np.append(np.repeat("407c628e-d319-25c1-17ee-e5a73500a010",n-a),np.repeat("00000000-0000-0000-0000-000000000000",a))
arr4 = np.append(np.repeat("4fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b",n-a),np.repeat("0.0.0.0",a))
ex1 = pd.DataFrame(arr1,columns=["val1"])
ex2 =
|
pd.DataFrame(arr1,columns=["val2"])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Important Variable Selection with SNPs
Created on Fri Jan 31 16:31:01 2020
@author: <NAME>
"""
# Import the libraries
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.svm import SVR
from sklearn.linear_model import MultiTaskLassoCV, MultiTaskElasticNetCV, LassoCV, ElasticNetCV, MultiTaskElasticNet, MultiTaskLasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, mean_squared_error
# Using chunk size to read rice data
def read_x_cont():
chunksize = 100
X_ct = pd.DataFrame()
for chunk in pd.read_csv("X_cont_ls_el.csv",low_memory=False, chunksize=chunksize, memory_map=True):
X_ct = pd.concat([X_ct, chunk])
return(X_ct)
# Function of data preprocessing
def process_variable(X, y):
# Drop 'IID' columns
X = X.drop('IID', axis = 1)
# Split data to training and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=10)
# Convert from integer to float
X_train= X_train.astype(float, 32)
X_test = X_test.astype(float, 32)
# Apply the same scaling to both datasets
scaler = StandardScaler()
X_train_scl = scaler.fit_transform(X_train)
X_test_scl = scaler.transform(X_test) # we transform rather than fit_transform
return(X_train_scl, X_test_scl, y_train, y_test)
"""Random Forest Regressor"""
#Function to run random forest with grid search and k-fold cross-validation.
def get_rf_model(X_train, y_train, X_test, y_test):
# Hyperparameters search grid
rf_param_grid = {'bootstrap': [False, True],
'n_estimators': [60, 70, 80, 90, 100],
'max_features': [0.6, 0.65, 0.7, 0.75, 0.8],
'min_samples_leaf': [1],
'min_samples_split': [2]
}
# Instantiate random forest regressor
rf_estimator = RandomForestRegressor(random_state=None)
# Create the GridSearchCV object
rf_model = GridSearchCV(estimator=rf_estimator, param_grid=rf_param_grid, cv=10, scoring='neg_mean_squared_error', n_jobs=-1, iid = True)
# Train the regressor
rf_model.fit(X_train, y_train)
# Get the best model
rf_model_best = rf_model.best_estimator_
# Make predictions using the optimised parameters
rf_pred = rf_model_best.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, rf_pred)
# Find r-squared
r2 = r2_score(y_test, rf_pred)
best_prs = rf_model.best_params_
print("Best Parameters:\n", rf_model.best_params_)
print("Best Score:\n", 'mse:', mse, 'r2:', r2)
return(mse, r2, best_prs)
"""Support Vector Regressor"""
#Function to run support vector machine with grid search and k-fold cross-validation.
def get_svm_model(X_train, y_train, X_test, y_test):
# Parameter grid
svm_param_grid = {'C': [0.1, 1, 10, 100], 'gamma': [1, 0.1, 0.01, 0.001, 0.0001, 10], "kernel": ["rbf"]}
# Create SVM grid search regressor
svm_grid = GridSearchCV(estimator = SVR(), param_grid= svm_param_grid, cv=10, scoring='neg_mean_squared_error', n_jobs=-1, iid = True)
# Train the regressor
svm_grid.fit(X_train, y_train)
# Get the best model
svm_model_best = svm_grid.best_estimator_
# Make predictions using the optimised parameters
svm_pred = svm_model_best.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, svm_pred)
# Find r-squared
r2 = r2_score(y_test, svm_pred)
best_prs = svm_grid.best_params_
print("Best Parameters:\n", svm_grid.best_params_)
print("Best Score:\n", 'mse:', mse, 'r2:', r2)
return(mse, r2, best_prs)
"""Lasso and Multi Task Lasso"""
#Lasso
def get_lasso_cv(X_train, y_train, X_test, y_test, cols):
# Create Lasso CV
ls_grid = LassoCV(cv = 10, random_state = 0, n_jobs = -1)
# Train the regressor
ls_grid.fit(X_train, y_train)
# Make predictions using the optimised parameters
ls_pred = ls_grid.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, ls_pred)
# Find r-squared
r2 = r2_score(y_test, ls_pred)
best_prs = ls_grid.alpha_
print("Best Parameters:\n", best_prs)
print("Best Score:\n", 'mse:', mse, 'r2:', r2)
# Get coefficients of the model
coef = pd.DataFrame(ls_grid.coef_.T, index = cols)
var = list(coef[coef[0] != 0].index)
print(coef.head())
print("Lasso picked " + str(sum(coef[0] != 0)) + " variables and eliminated the other " + str(sum(coef[0] == 0)) + " variables")
return(mse, r2, var, best_prs)
# Multi-task Lasso
def get_multitask_lasso_cv(X_train, y_train, X_test, y_test, cols):
# Create Multi-task Lasso CV
ls_grid = MultiTaskLassoCV(cv = 10, random_state = 0, n_jobs = -1)
# Train the regressor
ls_grid.fit(X_train, y_train)
# Make predictions using the optimised parameters
ls_pred = ls_grid.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, ls_pred)
# Find r-squared
r2 = r2_score(y_test, ls_pred)
best_prs = ls_grid.alpha_
print("Best Parameters:\n", best_prs)
print("Best Score:\n", 'mse:', mse, 'r2:', r2)
# Get coefficients of the model
coef = pd.DataFrame(ls_grid.coef_.T, index = cols)
var = list(coef[coef[0] != 0].index)
print(coef.head())
print("Multit-task Lasso picked " + str(sum(coef[0] != 0)) + " variables and eliminated the other " + str(sum(coef[0] == 0)) + " variables")
return(mse, r2, var, best_prs)
"""Elastic Net and Multi Task Elastic Net"""
# Elastic Net
def get_elasticnet_cv(X_train, y_train, X_test, y_test, cols):
# Create Elastic Net CV
el_grid = ElasticNetCV(cv = 10, random_state = 0, n_jobs = -1)
# Train the regressor
el_grid.fit(X_train, y_train)
# Make predictions using the optimised parameters
el_pred = el_grid.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, el_pred)
# Find r-squared
r2 = r2_score(y_test, el_pred)
best_prs = [el_grid.alpha_]
best_prs.append(el_grid.l1_ratio_)
print("Best Parameters:\n", best_prs)
print("Best Score:\n", 'mse:', mse, 'r-squared:', r2)
# Get coefficients of the model
coef = pd.DataFrame(el_grid.coef_.T, index = cols)
var = list(coef[coef[0] != 0].index)
print(coef.head())
print("ElasticNet picked " + str(sum(coef[0] != 0)) + " variables and eliminated the other " + str(sum(coef[0] == 0)) + " variables")
return(mse, r2, var, best_prs)
# Multi-task Elastic Net
def get_multitask_elasticnet_cv(X_train, y_train, X_test, y_test, cols):
# Create Multi Task Elastic Net CV
el_grid = MultiTaskElasticNetCV(cv = 10, random_state = 0, n_jobs = -1)
# Train the regressor
el_grid.fit(X_train, y_train)
# Make predictions using the optimised parameters
el_pred = el_grid.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, el_pred)
# Find r-squared
r2 = r2_score(y_test, el_pred)
best_prs = [el_grid.alpha_]
best_prs.append(el_grid.l1_ratio_)
print("Best Parameters:\n", best_prs)
print("Best Score:\n", 'mse:', mse, 'r-squared:', r2)
# Get coefficients of the model
coef = pd.DataFrame(el_grid.coef_.T, index = cols)
var = list(coef[coef[0] != 0].index)
print(coef.head())
print("Multi-task ElasticNet picked " + str(sum(coef[0] != 0)) + " variables and eliminated the other " + str(sum(coef[0] == 0)) + " variables")
return(mse, r2, var, best_prs)
# Evaluation each trait by multi-task Lasso
def eval_mtls_split_trait(alpha, X_train, Y_train, X_test, Y_test):
# Create Multi-Task Lasso
ls_tfl_grw = MultiTaskLasso(alpha, random_state = 0)
# Train the regressor
ls_tfl_grw.fit(X_train, Y_train)
# Make predictions using the optimised parameters
ls_pred = ls_tfl_grw.predict(X_test)
# Find mean squared error
mse_tfl = mean_squared_error(Y_test[:, 0], ls_pred[:, 0])
mse_grw= mean_squared_error(Y_test[:, 1], ls_pred[:, 1])
# Find r-squared
r2_tfl = r2_score(Y_test[:, 0], ls_pred[:, 0])
r2_grw = r2_score(Y_test[:, 1], ls_pred[:, 1])
return(mse_tfl, mse_grw, r2_tfl, r2_grw)
# Evaluation each trait by multi-task Elastic Net
def eval_mtel_split_trait(alpha, l1_ratio, X_train, Y_train, X_test, Y_test):
# Create Multi-Task Lasso
el_tfl_grw = MultiTaskElasticNet(alpha, l1_ratio, random_state = 0)
# Train the regressor
el_tfl_grw.fit(X_train, Y_train)
# Make predictions using the optimised parameters
el_pred = el_tfl_grw.predict(X_test)
# Find mean squared error
mse_tfl = mean_squared_error(Y_test[:, 0], el_pred[:, 0])
mse_grw= mean_squared_error(Y_test[:, 1], el_pred[:, 1])
# Find r-squared
r2_tfl = r2_score(Y_test[:, 0], el_pred[:, 0])
r2_grw = r2_score(Y_test[:, 1], el_pred[:, 1])
return(mse_tfl, mse_grw, r2_tfl, r2_grw)
if __name__ == '__main__':
print("")
print("")
print("|============================================================================|")
print("| |")
print("| ----- IMPORTANT VARIABLE SELECTION WITH SNPS ----- |")
print("| |")
print("|============================================================================|")
print("")
print("")
print("********************************* INPUT DATA *********************************")
print("")
print("Import data may take several minutes, please wait...")
print("")
# Import data
X_cont = read_x_cont()
cols = X_cont.columns[1::]
# Load data after pre-processinng
y_tfl = pd.read_csv("y_tfl.csv", header=None)
y_grw = pd.read_csv("y_grw.csv", header=None)
y_tfl_grw = pd.read_csv("y_tfl_grw.csv", header=None)
X_grw_2 = pd.read_csv("X_grw_2.csv", header='infer')
X_grw_3 = pd.read_csv("X_grw_3.csv", header='infer')
X_grw_4 = pd.read_csv("X_grw_4.csv", header='infer')
X_grw_5 = pd.read_csv("X_grw_5.csv", header='infer')
X_tfl_2 = pd.read_csv("X_tfl_2.csv", header='infer')
X_tfl_3 = pd.read_csv("X_tfl_3.csv", header='infer')
X_tfl_4 = pd.read_csv("X_tfl_4.csv", header='infer')
X_tfl_5 = pd.read_csv("X_tfl_5.csv", header='infer')
X_tfl_6 = pd.read_csv("X_tfl_6.csv", header='infer')
X_tfl_grw_2 = pd.read_csv("X_tfl_grw_2.csv", header='infer')
X_tfl_grw_25 = pd.read_csv("X_tfl_grw_25.csv", header='infer')
X_tfl_grw_1 = pd.read_csv("X_tfl_grw_1.csv", header='infer')
X_tfl_grw_75 = pd.read_csv("X_tfl_grw_75.csv", header='infer')
X_tfl_grw_3 = pd.read_csv("X_tfl_grw_3.csv", header='infer')
print("")
# Transform response variables to matrix type.
y_tfl = y_tfl.values.ravel()
y_grw = y_grw.values.ravel()
y_tfl_grw = y_tfl_grw.values
# Normalize rice data
X_grw_2_train, X_grw_2_test, y_grw_2_train, y_grw_2_test = process_variable(X_grw_2, y_grw)
X_grw_3_train, X_grw_3_test, y_grw_3_train, y_grw_3_test = process_variable(X_grw_3, y_grw)
X_grw_4_train, X_grw_4_test, y_grw_4_train, y_grw_4_test = process_variable(X_grw_4, y_grw)
X_grw_5_train, X_grw_5_test, y_grw_5_train, y_grw_5_test = process_variable(X_grw_5, y_grw)
X_tfl_2_train, X_tfl_2_test, y_tfl_2_train, y_tfl_2_test = process_variable(X_tfl_2, y_tfl)
X_tfl_3_train, X_tfl_3_test, y_tfl_3_train, y_tfl_3_test = process_variable(X_tfl_3, y_tfl)
X_tfl_4_train, X_tfl_4_test, y_tfl_4_train, y_tfl_4_test = process_variable(X_tfl_4, y_tfl)
X_tfl_5_train, X_tfl_5_test, y_tfl_5_train, y_tfl_5_test = process_variable(X_tfl_5, y_tfl)
X_tfl_6_train, X_tfl_6_test, y_tfl_6_train, y_tfl_6_test = process_variable(X_tfl_6, y_tfl)
X_tfl_grw_2_train, X_tfl_grw_2_test, y_tfl_grw_2_train, y_tfl_grw_2_test = process_variable(X_tfl_grw_2, y_tfl_grw)
X_tfl_grw_25_train, X_tfl_grw_25_test, y_tfl_grw_25_train, y_tfl_grw_25_test = process_variable(X_tfl_grw_25, y_tfl_grw)
X_tfl_grw_1_train, X_tfl_grw_1_test, y_tfl_grw_1_train, y_tfl_grw_1_test = process_variable(X_tfl_grw_1, y_tfl_grw)
X_tfl_grw_75_train, X_tfl_grw_75_test, y_tfl_grw_75_train, y_tfl_grw_75_test = process_variable(X_tfl_grw_75, y_tfl_grw)
X_tfl_grw_3_train, X_tfl_grw_3_test, y_tfl_grw_3_train, y_tfl_grw_3_test = process_variable(X_tfl_grw_3, y_tfl_grw)
X_grw_train, X_grw_test, y_grw_train, y_grw_test = process_variable(X_cont, y_grw)
X_tfl_train, X_tfl_test, y_tfl_train, y_tfl_test = process_variable(X_cont, y_tfl)
X_tfl_grw_train, X_tfl_grw_test, y_tfl_grw_train, y_tfl_grw_test = process_variable(X_cont, y_tfl_grw)
print("")
print("******************************* TRAINING MODELS *****************************")
print("")
rf_grw_mse = []
rf_grw_r2 = []
rf_tfl_mse = []
rf_tfl_r2 = []
rf_grw_prs = []
rf_tfl_prs = []
rf_tfl_grw_mse_0 = []
rf_tfl_grw_r2_0 = []
rf_tfl_grw_prs_0 = []
rf_tfl_grw_mse_1 = []
rf_tfl_grw_r2_1 = []
rf_tfl_grw_prs_1 = []
svr_grw_mse = []
svr_grw_r2 = []
svr_tfl_mse = []
svr_tfl_r2 = []
svr_grw_prs = []
svr_tfl_prs = []
svr_tfl_grw_mse_0 = []
svr_tfl_grw_r2_0 = []
svr_tfl_grw_prs_0 = []
svr_tfl_grw_mse_1 = []
svr_tfl_grw_r2_1 = []
svr_tfl_grw_prs_1 = []
# Filtering variables by p_value.
p_value = ['<=5e-6', '<=5e-5', '<=5e-4', '<=5e-3', '<=5e-2']
p_value_2 = ['<=5e-3','<=7.5e-3', '<=1e-2', '<=2.5e-2', '<=5e-2']
print("Find mse and r-squared for random forest model of grain weight...")
rf_grw_mse_2, rf_grw_r2_2, rf_grw_prs_2 = get_rf_model(X_grw_2_train, y_grw_2_train, X_grw_2_test, y_grw_2_test)
rf_grw_mse.append(rf_grw_mse_2)
rf_grw_r2.append(rf_grw_r2_2)
rf_grw_prs.append(rf_grw_prs_2)
rf_grw_mse_3, rf_grw_r2_3, rf_grw_prs_3 = get_rf_model(X_grw_3_train, y_grw_3_train, X_grw_3_test, y_grw_3_test)
rf_grw_mse.append(rf_grw_mse_3)
rf_grw_r2.append(rf_grw_r2_3)
rf_grw_prs.append(rf_grw_prs_3)
rf_grw_mse_4, rf_grw_r2_4, rf_grw_prs_4 = get_rf_model(X_grw_4_train, y_grw_4_train, X_grw_4_test, y_grw_4_test)
rf_grw_mse.append(rf_grw_mse_4)
rf_grw_r2.append(rf_grw_r2_4)
rf_grw_prs.append(rf_grw_prs_4)
rf_grw_mse_5, rf_grw_r2_5, rf_grw_prs_5 = get_rf_model(X_grw_5_train, y_grw_5_train, X_grw_5_test, y_grw_5_test)
rf_grw_mse.append(rf_grw_mse_5)
rf_grw_r2.append(rf_grw_r2_5)
rf_grw_prs.append(rf_grw_prs_5)
rf_grw = pd.DataFrame({'rf_grw_mse':rf_grw_mse[::-1], 'rf_grw_r2':rf_grw_r2[::-1], 'rf_grw_prs':rf_grw_prs[::-1]})
rf_grw.set_index(pd.Index(p_value[1:5]), 'p_value', inplace = True)
rf_grw.to_csv('rf_grw.csv')
print('RF of grain weight is saved')
print("Find mse and r-squared for random forest model of time to flowering...")
rf_tfl_mse_2, rf_tfl_r2_2, rf_tfl_prs_2 = get_rf_model(X_tfl_2_train, y_tfl_2_train, X_tfl_2_test, y_tfl_2_test)
rf_tfl_mse.append(rf_tfl_mse_2)
rf_tfl_r2.append(rf_tfl_r2_2)
rf_tfl_prs.append(rf_tfl_prs_2)
rf_tfl_mse_3, rf_tfl_r2_3, rf_tfl_prs_3 = get_rf_model(X_tfl_3_train, y_tfl_3_train, X_tfl_3_test, y_tfl_3_test)
rf_tfl_mse.append(rf_tfl_mse_3)
rf_tfl_r2.append(rf_tfl_r2_3)
rf_tfl_prs.append(rf_tfl_prs_3)
rf_tfl_mse_4, rf_tfl_r2_4, rf_tfl_prs_4 = get_rf_model(X_tfl_4_train, y_tfl_4_train, X_tfl_4_test, y_tfl_4_test)
rf_tfl_mse.append(rf_tfl_mse_4)
rf_tfl_r2.append(rf_tfl_r2_4)
rf_tfl_prs.append(rf_tfl_prs_4)
rf_tfl_mse_5, rf_tfl_r2_5, rf_tfl_prs_5 = get_rf_model(X_tfl_5_train, y_tfl_5_train, X_tfl_5_test, y_tfl_5_test)
rf_tfl_mse.append(rf_tfl_mse_5)
rf_tfl_r2.append(rf_tfl_r2_5)
rf_tfl_prs.append(rf_tfl_prs_5)
rf_tfl_mse_6, rf_tfl_r2_6, rf_tfl_prs_6 = get_rf_model(X_tfl_6_train, y_tfl_6_train, X_tfl_6_test, y_tfl_6_test)
rf_tfl_mse.append(rf_tfl_mse_6)
rf_tfl_r2.append(rf_tfl_r2_6)
rf_tfl_prs.append(rf_tfl_prs_6)
rf_tfl = pd.DataFrame({'rf_tfl_mse':rf_tfl_mse[::-1], 'rf_tfl_r2':rf_tfl_r2[::-1], 'rf_tfl_prs':rf_tfl_prs[::-1]})
rf_tfl.set_index(pd.Index(p_value), 'p_value', inplace = True)
rf_tfl.to_csv('rf_tfl.csv')
print('RF of time to flowering is saved')
print("Find mse and r-squared for random forest model of time to flowering and grain weight...")
# Output is time to flowering
rf_tfl_grw_mse_2_0, rf_tfl_grw_r2_2_0, rf_tfl_grw_prs_2_0 = get_rf_model(X_tfl_grw_2_train, y_tfl_grw_2_train[:, 0], X_tfl_grw_2_test, y_tfl_grw_2_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_2_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_2_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_2_0)
rf_tfl_grw_mse_25_0, rf_tfl_grw_r2_25_0, rf_tfl_grw_prs_25_0 = get_rf_model(X_tfl_grw_25_train, y_tfl_grw_25_train[:, 0], X_tfl_grw_25_test, y_tfl_grw_25_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_25_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_25_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_25_0)
rf_tfl_grw_mse_1_0, rf_tfl_grw_r2_1_0, rf_tfl_grw_prs_1_0 = get_rf_model(X_tfl_grw_1_train, y_tfl_grw_1_train[:, 0], X_tfl_grw_1_test, y_tfl_grw_1_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_1_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_1_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_1_0)
rf_tfl_grw_mse_75_0, rf_tfl_grw_r2_75_0, rf_tfl_grw_prs_75_0 = get_rf_model(X_tfl_grw_75_train, y_tfl_grw_75_train[:, 0], X_tfl_grw_75_test, y_tfl_grw_75_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_75_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_75_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_75_0)
rf_tfl_grw_mse_3_0, rf_tfl_grw_r2_3_0, rf_tfl_grw_prs_3_0 = get_rf_model(X_tfl_grw_3_train, y_tfl_grw_3_train[:, 0], X_tfl_grw_3_test, y_tfl_grw_3_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_3_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_3_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_3_0)
rf_tfl_grw_0 = pd.DataFrame({'rf_tfl_grw_mse_0':rf_tfl_grw_mse_0[::-1], 'rf_tfl_grw_r2_0':rf_tfl_grw_r2_0[::-1], 'rf_tfl_grw_prs_0':rf_tfl_grw_prs_0[::-1]})
rf_tfl_grw_0.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
rf_tfl_grw_0.to_csv('rf_tfl_grw_0.csv')
# Output is grain weight
rf_tfl_grw_mse_2_1, rf_tfl_grw_r2_2_1, rf_tfl_grw_prs_2_1 = get_rf_model(X_tfl_grw_2_train, y_tfl_grw_2_train[:, 1], X_tfl_grw_2_test, y_tfl_grw_2_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_2_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_2_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_2_1)
rf_tfl_grw_mse_25_1, rf_tfl_grw_r2_25_1, rf_tfl_grw_prs_25_1 = get_rf_model(X_tfl_grw_25_train, y_tfl_grw_25_train[:, 1], X_tfl_grw_25_test, y_tfl_grw_25_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_25_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_25_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_25_1)
rf_tfl_grw_mse_1_1, rf_tfl_grw_r2_1_1, rf_tfl_grw_prs_1_1 = get_rf_model(X_tfl_grw_1_train, y_tfl_grw_1_train[:, 1], X_tfl_grw_1_test, y_tfl_grw_1_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_1_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_1_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_1_1)
rf_tfl_grw_mse_75_1, rf_tfl_grw_r2_75_1, rf_tfl_grw_prs_75_1 = get_rf_model(X_tfl_grw_75_train, y_tfl_grw_75_train[:, 1], X_tfl_grw_75_test, y_tfl_grw_75_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_75_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_75_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_75_1)
rf_tfl_grw_mse_3_1, rf_tfl_grw_r2_3_1, rf_tfl_grw_prs_3_1 = get_rf_model(X_tfl_grw_3_train, y_tfl_grw_3_train[:, 1], X_tfl_grw_3_test, y_tfl_grw_3_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_3_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_3_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_3_1)
rf_tfl_grw_1 = pd.DataFrame({'rf_tfl_grw_mse_1':rf_tfl_grw_mse_1[::-1], 'rf_tfl_grw_r2_1':rf_tfl_grw_r2_1[::-1], 'rf_tfl_grw_prs_1':rf_tfl_grw_prs_1[::-1]})
rf_tfl_grw_1.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
rf_tfl_grw_1.to_csv('rf_tfl_grw_1.csv')
print('RF of time to flowering and grain weight is saved')
print("Find mse and r-squared for svm model of grain weight...")
svr_grw_mse_2, svr_grw_r2_2, svr_grw_prs_2 = get_svm_model(X_grw_2_train, y_grw_2_train, X_grw_2_test, y_grw_2_test)
svr_grw_mse.append(svr_grw_mse_2)
svr_grw_r2.append(svr_grw_r2_2)
svr_grw_prs.append(svr_grw_prs_2)
svr_grw_mse_3, svr_grw_r2_3, svr_grw_prs_3 = get_svm_model(X_grw_3_train, y_grw_3_train, X_grw_3_test, y_grw_3_test)
svr_grw_mse.append(svr_grw_mse_3)
svr_grw_r2.append(svr_grw_r2_3)
svr_grw_prs.append(svr_grw_prs_3)
svr_grw_mse_4, svr_grw_r2_4, svr_grw_prs_4 = get_svm_model(X_grw_4_train, y_grw_4_train, X_grw_4_test, y_grw_4_test)
svr_grw_mse.append(svr_grw_mse_4)
svr_grw_r2.append(svr_grw_r2_4)
svr_grw_prs.append(svr_grw_prs_4)
svr_grw_mse_5, svr_grw_r2_5, svr_grw_prs_5 = get_svm_model(X_grw_5_train, y_grw_5_train, X_grw_5_test, y_grw_5_test)
svr_grw_mse.append(svr_grw_mse_5)
svr_grw_r2.append(svr_grw_r2_5)
svr_grw_prs.append(svr_grw_prs_5)
svr_grw = pd.DataFrame({'svr_grw_mse':svr_grw_mse[::-1], 'svr_grw_r2':svr_grw_r2[::-1], 'svr_grw_prs':svr_grw_prs[::-1]})
svr_grw.set_index(pd.Index(p_value[1:5]), 'p_value', inplace = True)
svr_grw.to_csv('svr_grw.csv')
print('SVR of grain weight is saved')
print("Find mse and r-squared for svm model of time to flowering...")
svr_tfl_mse_2, svr_tfl_r2_2, svr_tfl_prs_2 = get_svm_model(X_tfl_2_train, y_tfl_2_train, X_tfl_2_test, y_tfl_2_test)
svr_tfl_mse.append(svr_tfl_mse_2)
svr_tfl_r2.append(svr_tfl_r2_2)
svr_tfl_prs.append(svr_tfl_prs_2)
svr_tfl_mse_3, svr_tfl_r2_3, svr_tfl_prs_3 = get_svm_model(X_tfl_3_train, y_tfl_3_train, X_tfl_3_test, y_tfl_3_test)
svr_tfl_mse.append(svr_tfl_mse_3)
svr_tfl_r2.append(svr_tfl_r2_3)
svr_tfl_prs.append(svr_tfl_prs_3)
svr_tfl_mse_4, svr_tfl_r2_4, svr_tfl_prs_4 = get_svm_model(X_tfl_4_train, y_tfl_4_train, X_tfl_4_test, y_tfl_4_test)
svr_tfl_mse.append(svr_tfl_mse_4)
svr_tfl_r2.append(svr_tfl_r2_4)
svr_tfl_prs.append(svr_tfl_prs_4)
svr_tfl_mse_5, svr_tfl_r2_5, svr_tfl_prs_5 = get_svm_model(X_tfl_5_train, y_tfl_5_train, X_tfl_5_test, y_tfl_5_test)
svr_tfl_mse.append(svr_tfl_mse_5)
svr_tfl_r2.append(svr_tfl_r2_5)
svr_tfl_prs.append(svr_tfl_prs_5)
svr_tfl_mse_6, svr_tfl_r2_6, svr_tfl_prs_6 = get_svm_model(X_tfl_6_train, y_tfl_6_train, X_tfl_6_test, y_tfl_6_test)
svr_tfl_mse.append(svr_tfl_mse_6)
svr_tfl_r2.append(svr_tfl_r2_6)
svr_tfl_prs.append(svr_tfl_prs_6)
svr_tfl = pd.DataFrame({'svr_tfl_mse':svr_tfl_mse[::-1], 'svr_tfl_r2':svr_tfl_r2[::-1], 'svr_tfl_prs':svr_tfl_prs[::-1]})
svr_tfl.set_index(pd.Index(p_value), 'p_value', inplace = True)
svr_tfl.to_csv('svr_tfl.csv')
print('SVR of time to flowering is saved')
print("Find mse and r-squared for svm model of time to flowering and grain weight... ")
# Output is time to flowering
svr_tfl_grw_mse_2_0, svr_tfl_grw_r2_2_0, svr_tfl_grw_prs_2_0 = get_svm_model(X_tfl_grw_2_train, y_tfl_grw_2_train[:, 0], X_tfl_grw_2_test, y_tfl_grw_2_test[:, 0])
svr_tfl_grw_mse_0.append(svr_tfl_grw_mse_2_0)
svr_tfl_grw_r2_0.append(svr_tfl_grw_r2_2_0)
svr_tfl_grw_prs_0.append(svr_tfl_grw_prs_2_0)
svr_tfl_grw_mse_25_0, svr_tfl_grw_r2_25_0, svr_tfl_grw_prs_25_0 = get_svm_model(X_tfl_grw_25_train, y_tfl_grw_25_train[:, 0], X_tfl_grw_25_test, y_tfl_grw_25_test[:, 0])
svr_tfl_grw_mse_0.append(svr_tfl_grw_mse_25_0)
svr_tfl_grw_r2_0.append(svr_tfl_grw_r2_25_0)
svr_tfl_grw_prs_0.append(svr_tfl_grw_prs_25_0)
svr_tfl_grw_mse_1_0, svr_tfl_grw_r2_1_0, svr_tfl_grw_prs_1_0 = get_svm_model(X_tfl_grw_1_train, y_tfl_grw_1_train[:, 0], X_tfl_grw_1_test, y_tfl_grw_1_test[:, 0])
svr_tfl_grw_mse_0.append(svr_tfl_grw_mse_1_0)
svr_tfl_grw_r2_0.append(svr_tfl_grw_r2_1_0)
svr_tfl_grw_prs_0.append(svr_tfl_grw_prs_1_0)
svr_tfl_grw_mse_75_0, svr_tfl_grw_r2_75_0, svr_tfl_grw_prs_75_0 = get_svm_model(X_tfl_grw_75_train, y_tfl_grw_75_train[:, 0], X_tfl_grw_75_test, y_tfl_grw_75_test[:, 0])
svr_tfl_grw_mse_0.append(svr_tfl_grw_mse_75_0)
svr_tfl_grw_r2_0.append(svr_tfl_grw_r2_75_0)
svr_tfl_grw_prs_0.append(svr_tfl_grw_prs_75_0)
svr_tfl_grw_mse_3_0, svr_tfl_grw_r2_3_0, svr_tfl_grw_prs_3_0 = get_svm_model(X_tfl_grw_3_train, y_tfl_grw_3_train[:, 0], X_tfl_grw_3_test, y_tfl_grw_3_test[:, 0])
svr_tfl_grw_mse_0.append(svr_tfl_grw_mse_3_0)
svr_tfl_grw_r2_0.append(svr_tfl_grw_r2_3_0)
svr_tfl_grw_prs_0.append(svr_tfl_grw_prs_3_0)
svr_tfl_grw_0 = pd.DataFrame({'svr_tfl_grw_mse_0':svr_tfl_grw_mse_0[::-1], 'svr_tfl_grw_r2_0':svr_tfl_grw_r2_0[::-1], 'svr_tfl_grw_prs_0':svr_tfl_grw_prs_0[::-1]})
svr_tfl_grw_0.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
svr_tfl_grw_0.to_csv('svr_tfl_grw_0.csv')
# Output is grain weight
svr_tfl_grw_mse_2_1, svr_tfl_grw_r2_2_1, svr_tfl_grw_prs_2_1 = get_svm_model(X_tfl_grw_2_train, y_tfl_grw_2_train[:, 1], X_tfl_grw_2_test, y_tfl_grw_2_test[:, 1])
svr_tfl_grw_mse_1.append(svr_tfl_grw_mse_2_1)
svr_tfl_grw_r2_1.append(svr_tfl_grw_r2_2_1)
svr_tfl_grw_prs_1.append(svr_tfl_grw_prs_2_1)
svr_tfl_grw_mse_25_1, svr_tfl_grw_r2_25_1, svr_tfl_grw_prs_25_1 = get_svm_model(X_tfl_grw_25_train, y_tfl_grw_25_train[:, 1], X_tfl_grw_25_test, y_tfl_grw_25_test[:, 1])
svr_tfl_grw_mse_1.append(svr_tfl_grw_mse_25_1)
svr_tfl_grw_r2_1.append(svr_tfl_grw_r2_25_1)
svr_tfl_grw_prs_1.append(svr_tfl_grw_prs_25_1)
svr_tfl_grw_mse_1_1, svr_tfl_grw_r2_1_1, svr_tfl_grw_prs_1_1 = get_svm_model(X_tfl_grw_1_train, y_tfl_grw_1_train[:, 1], X_tfl_grw_1_test, y_tfl_grw_1_test[:, 1])
svr_tfl_grw_mse_1.append(svr_tfl_grw_mse_1_1)
svr_tfl_grw_r2_1.append(svr_tfl_grw_r2_1_1)
svr_tfl_grw_prs_1.append(svr_tfl_grw_prs_1_1)
svr_tfl_grw_mse_75_1, svr_tfl_grw_r2_75_1, svr_tfl_grw_prs_75_1 = get_svm_model(X_tfl_grw_75_train, y_tfl_grw_75_train[:, 1], X_tfl_grw_75_test, y_tfl_grw_75_test[:, 1])
svr_tfl_grw_mse_1.append(svr_tfl_grw_mse_75_1)
svr_tfl_grw_r2_1.append(svr_tfl_grw_r2_75_1)
svr_tfl_grw_prs_1.append(svr_tfl_grw_prs_75_1)
svr_tfl_grw_mse_3_1, svr_tfl_grw_r2_3_1, svr_tfl_grw_prs_3_1 = get_svm_model(X_tfl_grw_3_train, y_tfl_grw_3_train[:, 1], X_tfl_grw_3_test, y_tfl_grw_3_test[:, 1])
svr_tfl_grw_mse_1.append(svr_tfl_grw_mse_3_1)
svr_tfl_grw_r2_1.append(svr_tfl_grw_r2_3_1)
svr_tfl_grw_prs_1.append(svr_tfl_grw_prs_3_1)
svr_tfl_grw_1 = pd.DataFrame({'svr_tfl_grw_mse_1':svr_tfl_grw_mse_1[::-1], 'svr_tfl_grw_r2_1':svr_tfl_grw_r2_1[::-1], 'svr_tfl_grw_prs_1':svr_tfl_grw_prs_1[::-1]})
svr_tfl_grw_1.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
svr_tfl_grw_1.to_csv('svr_tfl_grw_1.csv')
print("")
print("Create data frames...")
print("")
grw_mse = pd.DataFrame({'rf_grw_mse':rf_grw_mse[::-1], 'svr_grw_mse':svr_grw_mse[::-1]})
grw_mse.set_index(pd.Index(p_value[1:5]), 'p_value', inplace = True)
grw_r2 = pd.DataFrame({'rf_grw_r2':rf_grw_r2[::-1], 'svr_grw_r2':svr_grw_r2[::-1]})
grw_r2.set_index(pd.Index(p_value[1:5]), 'p_value', inplace = True)
tfl_mse = pd.DataFrame({'rf_tfl_mse':rf_tfl_mse[::-1], 'svr_tfl_mse':svr_tfl_mse[::-1]})
tfl_mse.set_index(pd.Index(p_value), 'p_value', inplace = True)
tfl_r2 = pd.DataFrame({'rf_tfl_r2':rf_tfl_r2[::-1], 'svr_tfl_r2':svr_tfl_r2[::-1]})
tfl_r2.set_index(pd.Index(p_value), 'p_value', inplace = True)
tfl_grw_mse = pd.DataFrame({'rf_tfl_mse':rf_tfl_grw_mse_0[::-1], 'rf_grw_mse':rf_tfl_grw_mse_1[::-1], 'svr_tfl_mse':svr_tfl_grw_mse_0[::-1], 'svr_grw_mse':svr_tfl_grw_mse_1[::-1]})
tfl_grw_mse.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
tfl_grw_r2 = pd.DataFrame({'rf_tfl_r2':rf_tfl_grw_r2_0[::-1], 'rf_grw_r2':rf_tfl_grw_r2_1[::-1], 'svr_tfl_r2':svr_tfl_grw_r2_0[::-1], 'svr_grw_r2':svr_tfl_grw_r2_1[::-1]})
tfl_grw_r2.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
print("")
print("Find mse and r-squared for lasso and multitasklasso model...")
print("")
print("For grain weight...")
print("")
mse_grw_ls, r2_grw_ls, var_grw_ls, ls_grw_prs = get_lasso_cv(X_grw_train, y_grw_train, X_grw_test, y_grw_test, cols)
print("")
print("For time to flowering...")
print("")
mse_tfl_ls, r2_tfl_ls, var_tfl_ls, ls_tfl_prs = get_lasso_cv(X_tfl_train, y_tfl_train, X_tfl_test, y_tfl_test, cols)
print("")
print("For time to flowering and grain weight...")
print("")
mse_tfl_grw_ls, r2_tfl_grw_ls, var_tfl_grw_ls, ls_tfl_grw_prs = get_multitask_lasso_cv(X_tfl_grw_train, y_tfl_grw_train, X_tfl_grw_test, y_tfl_grw_test, cols)
print("")
print("Find mse and r-squared for elasticnet and multitaskelasticnet model...")
print("")
print("For grain weight...")
print("")
mse_grw_el, r2_grw_el, var_grw_el, el_grw_prs = get_elasticnet_cv(X_grw_train, y_grw_train, X_grw_test, y_grw_test, cols)
print("")
print("For time to flowering...")
print("")
mse_tfl_el, r2_tfl_el, var_tfl_el, el_tfl_prs = get_elasticnet_cv(X_tfl_train, y_tfl_train, X_tfl_test, y_tfl_test, cols)
print("")
print("For time to flowering and grain weight...")
print("")
mse_tfl_grw_el, r2_tfl_grw_el, var_tfl_grw_el, el_tfl_grw_prs = get_multitask_elasticnet_cv(X_tfl_grw_train, y_tfl_grw_train, X_tfl_grw_test, y_tfl_grw_test, cols)
# Mse, r2 of each trait with the multi-task problem
mtls_mse_tfl, mtls_mse_grw, mtls_r2_tfl, mtls_r2_grw = eval_mtls_split_trait(2.41812258083032, X_tfl_grw_train, y_tfl_grw_train, X_tfl_grw_test, y_tfl_grw_test)
mtel_mse_tfl, mtel_mse_grw, mtel_r2_tfl, mtel_r2_grw = eval_mtel_split_trait(4.20631940576943, 0.5, X_tfl_grw_train, y_tfl_grw_train, X_tfl_grw_test, y_tfl_grw_test)
ls_table = pd.DataFrame({'mse_grw_ls':[mse_grw_ls], 'r2_grw_ls':[r2_grw_ls],
'mse_tfl_ls':[mse_tfl_ls], 'r2_tfl_ls':[r2_tfl_ls],
'mse_tfl_grw_ls':[mse_tfl_grw_ls], 'r2_tfl_grw_ls':[r2_tfl_grw_ls],
'ls_grw_prs':[ls_grw_prs], 'ls_tfl_prs':[ls_tfl_prs], 'ls_tfl_grw_prs':[ls_tfl_grw_prs]})
el_table = pd.DataFrame({'mse_grw_el':[mse_grw_el], 'r2_grw_el':[r2_grw_el],
'mse_tfl_el':[mse_tfl_el], 'r2_tfl_el':[r2_tfl_el],
'mse_tfl_grw_el':[mse_tfl_grw_el], 'r2_tfl_grw_el':[r2_tfl_grw_el],
'el_grw_prs':[el_grw_prs], 'el_tfl_prs':[el_tfl_prs], 'el_tfl_grw_prs':[el_tfl_grw_prs]})
ls_split_trait = pd.DataFrame({'mtls_mse_tfl':[mtls_mse_tfl],'mtls_mse_grw':[mtls_mse_grw], 'mtls_r2_tfl':[mtls_r2_tfl], 'mtls_r2_grw':[mtls_r2_grw]})
el_split_trait = pd.DataFrame({'mtel_mse_tfl':[mtel_mse_tfl],'mtel_mse_grw':[mtel_mse_grw], 'mtel_r2_tfl':[mtel_r2_tfl], 'mtel_r2_grw':[mtel_r2_grw]})
var_tfl_ls = pd.DataFrame({'var_tfl_ls':var_tfl_ls})
var_grw_ls = pd.DataFrame({'var_grw_ls':var_grw_ls})
var_tfl_grw_ls = pd.DataFrame({'var_tfl_grw_ls':var_tfl_grw_ls})
var_tfl_el =
|
pd.DataFrame({'var_tfl_el':var_tfl_el})
|
pandas.DataFrame
|
from __future__ import print_function
import string
import sys
import os
from collections import deque
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
import tensorflow as tf
import keras
keras.backend.image_data_format()
from keras import backend as K
from keras import regularizers
from keras.layers import Input, Dense, Reshape, Lambda, Conv1D, Flatten, MaxPooling1D, UpSampling1D, GlobalMaxPooling1D
from keras.layers import LSTM, Bidirectional, BatchNormalization, Dropout, Concatenate, Embedding, Activation, Dot, dot
from keras.models import Model, clone_model, Sequential
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping,ModelCheckpoint
from keras.constraints import unitnorm
from keras_layer_normalization import LayerNormalization
tf.keras.backend.set_floatx('float32')
import sklearn as sk
from sklearn.base import BaseEstimator, _pprint
from sklearn.utils import check_array, check_random_state
from sklearn.utils.validation import check_is_fitted
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import LocallyLinearEmbedding, MDS, Isomap, TSNE
from sklearn.decomposition import PCA, IncrementalPCA, KernelPCA, SparsePCA, TruncatedSVD, FastICA, NMF, MiniBatchDictionaryLearning
from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold, GroupKFold, train_test_split
from sklearn.metrics import mean_squared_error, explained_variance_score, mean_absolute_error, median_absolute_error, r2_score
from sklearn.metrics import average_precision_score, precision_score, recall_score, f1_score, roc_auc_score, matthews_corrcoef
from sklearn.metrics import roc_curve, precision_recall_curve, RocCurveDisplay, PrecisionRecallDisplay
from sklearn.metrics import roc_auc_score,accuracy_score,matthews_corrcoef
from scipy import stats
from scipy.stats import multivariate_normal, kurtosis, skew, pearsonr, spearmanr
import processSeq
from processSeq import load_seq_1, kmer_dict, load_signal_1, load_seq_2, load_seq_2_kmer, load_seq_altfeature
import xgboost
import pickle
import os.path
from optparse import OptionParser
import time
from timeit import default_timer as timer
import utility_1
from utility_1 import mapping_Idx
import h5py
import json
# generate sequences
# idx_sel_list: chrom, serial
# seq_list: relative positions
def generate_sequences(idx_sel_list, gap_tol=5, region_list=[]):
chrom = idx_sel_list[:,0]
chrom_vec = np.unique(chrom)
chrom_vec = np.sort(chrom_vec)
seq_list = []
print(len(chrom),chrom_vec)
for chrom_id in chrom_vec:
b1 = np.where(chrom==chrom_id)[0]
t_serial = idx_sel_list[b1,1]
prev_serial = t_serial[0:-1]
next_serial = t_serial[1:]
distance = next_serial-prev_serial
b2 = np.where(distance>gap_tol)[0]
if len(b2)>0:
if len(region_list)>0:
# print('region_list',region_list,len(b2))
b_1 = np.where(region_list[:,0]==chrom_id)[0]
# print(b2)
t_serial = idx_sel_list[b2,1]
if len(b_1)>0:
# b2 = np.setdiff1d(b2,region_list[b_1,1])
# print(region_list,region_list[b_1,1],len(b2))
t_id1 = utility_1.mapping_Idx(t_serial,region_list[b_1,1])
t_id1 = t_id1[t_id1>=0]
t_id2 = b2[t_id1]
b2 = np.setdiff1d(b2,t_id2)
# print(len(b2))
# print(idx_sel_list[b2])
# return
# print('gap',len(b2))
if len(b2)>0:
t_seq = list(np.vstack((b2[0:-1]+1,b2[1:])).T)
t_seq.insert(0,np.asarray([0,b2[0]]))
t_seq.append(np.asarray([b2[-1]+1,len(b1)-1]))
else:
t_seq = [np.asarray([0,len(b1)-1])]
# print(t_seq)
# print(chrom_id,len(t_seq),max(distance))
seq_list.extend(b1[np.asarray(t_seq)])
return np.asarray(seq_list)
# select sample
def sample_select2a1(x_mtx, y, idx_sel_list, seq_list, tol=5, L=5):
num_sample = len(idx_sel_list)
num1 = len(seq_list)
size1 = 2*L+1
print(num_sample,num1,size1)
feature_dim = x_mtx.shape[1]
vec1_local = np.zeros((num_sample,size1),dtype=int)
vec1_serial = np.zeros((num_sample,size1),dtype=int)
feature_mtx = np.zeros((num_sample,size1,feature_dim),dtype=np.float32)
signal_mtx = np.zeros((num_sample,size1))
ref_serial = idx_sel_list[:,1]
id_vec = np.zeros(num_sample,dtype=np.int8)
for i in range(0,num1):
s1, s2 = seq_list[i][0], seq_list[i][1]+1
serial = ref_serial[s1:s2]
id_vec[s1:s2] = 1
# print('start stop',s1,s2,serial)
num2 = len(serial)
t1 = np.outer(list(range(s1,s2)),np.ones(size1))
t2 = t1 + np.outer(np.ones(num2),list(range(-L,L+1)))
t2[t2<s1] = s1
t2[t2>=s2] = s2-1
idx = np.int64(t2)
# print(idx)
vec1_local[s1:s2] = idx
vec1_serial[s1:s2] = ref_serial[idx]
feature_mtx[s1:s2] = x_mtx[idx]
signal_mtx[s1:s2] = y[idx]
# if i%10000==0:
# print(i,num2,vec1_local[s1],vec1_serial[s1])
id1 = np.where(id_vec>0)[0]
num2 = len(id1)
if num2<num_sample:
feature_mtx, signal_mtx = feature_mtx[id1], signal_mtx[id1]
# vec1_serial, vec1_local = vec1_serial[id1], vec1_local[id1]
vec1_serial = vec1_serial[id1]
id_1 = -np.ones(sample_num,dtype=np.int64)
id_1[id1] = np.arange(num2)
vec1_local = id_1[vec1_local]
b1 = np.where(vec1_local<0)[0]
if len(b1)>0:
print('error!',b1)
return -1
# signal_mtx = signal_mtx[:,np.newaxis]
signal_mtx = np.expand_dims(signal_mtx, axis=-1)
# signal_mtx = np.expand_dims(signal_ntx, axis=-1)
return feature_mtx, signal_mtx, vec1_serial, vec1_local
def score_2a(y, y_predicted):
score1 = mean_squared_error(y, y_predicted)
score2 = pearsonr(y, y_predicted)
score3 = explained_variance_score(y, y_predicted)
score4 = mean_absolute_error(y, y_predicted)
score5 = median_absolute_error(y, y_predicted)
score6 = r2_score(y, y_predicted)
score7, pvalue = spearmanr(y,y_predicted)
# vec1 = [score1, score2[0], score2[1], score3, score4, score5, score6]
vec1 = [score1, score2[0], score2[1], score3, score4, score5, score6, score7, pvalue]
return vec1
def read_phyloP(species_name):
path1 = './'
filename1 = '%s/estimate_rt/estimate_rt_%s.txt'%(path1,species_name)
# filename2a = 'test_seq_%s.1.txt'%(species_name)
file1 = pd.read_csv(filename1,sep='\t')
col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name)
chrom_ori, start_ori, stop_ori, serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1['serial'])
num_sample = len(chrom_ori)
chrom_vec = np.unique(chrom_ori)
chrom_vec = ['chr22']
for chrom_id in chrom_vec:
filename1 = '%s/phyloP/hg19.phyloP100way.%s.bedGraph'%(path1,chrom_id)
data1 = pd.read_csv(filename1,header=None,sep='\t')
chrom, start, stop, score = data1[0], data1[1], data1[2], data1[3]
len1 = stop-start
b = np.where(chrom_ori==chrom_id)[0]
num_sample1 = len(b)
vec1 = np.zeros((num_sample1,16))
print(chrom_id,len(chrom),len(b))
cnt = 0
b1 = [-1]
for i in b:
t1 = b1[-1]+1
b1 = np.where((start[t1:]>=start_ori[i])&(stop[t1:]<stop_ori[i]))[0]+t1
if len(b1)==0:
b1 = [-1]
continue
t_len1, t_score = np.asarray(len1[b1]), np.asarray(score[b1])
s1 = 0
s2 = np.sum(t_len1)
i1 = cnt
for j in range(0,12):
temp1 = (j-8)*2.5
b2 = np.where((t_score<temp1+2.5)&(t_score>=temp1))[0]
print(b2)
vec1[i1,j] = np.sum(t_len1[b2])*1.0/s2
s1 = s1+temp1*vec1[i1,j]
vec1[i1,12] = s1 # average
vec1[i1,13] = np.median(t_score)
vec1[i1,14] = np.max(t_score)
vec1[i1,15] = np.min(t_score)
cnt += 1
if cnt%1000==0:
print(cnt,len(b1),s2,vec1[i1,12:16])
break
# dict1 = dict()
# dict1['vec'], dict1['index'] = vec1,b
# np.save('phyloP_%s'%(chrom_id),dict1,allow_pickle=True)
fields = ['index']
for j in range(0,12):
temp1 = (j-8)*2.5
fields.append('%s-%s'%(temp1,temp1+2.5))
fields.extend(range(0,4))
data1 = pd.DataFrame(data = np.hstack((b[:,np.newaxis],vec1)),columns=fields)
data1.to_csv('phyloP_%s.txt'%(chrom_id),sep='\t',index=False)
return vec1
def read_phyloP_1(ref_filename,header,file_path,chrom_vec,n_level=15,offset=10,magnitude=2):
file1 = pd.read_csv(ref_filename,header=header,sep='\t')
# col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name)
colnames = list(file1)
col1, col2, col3, col4 = colnames[0], colnames[1], colnames[2], colnames[3]
chrom_ori, start_ori, stop_ori, serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1[col4])
num_sample = len(chrom_ori)
# chrom_vec = np.unique(chrom_ori)
# chrom_vec = [chrom_id]
# n_level, offset, magnitude = 15, 10, 2
score_max = (n_level-offset)*magnitude
for chrom_id in chrom_vec:
# filename1 = '%s/hg19.phyloP100way.%s.bedGraph'%(file_path,chrom_id)
filename1 = '%s/chr%s.phyloP100way.bedGraph'%(file_path,chrom_id)
data1 = pd.read_csv(filename1,header=None,sep='\t')
chrom, start, stop, score = data1[0], data1[1], data1[2], data1[3]
len1 = stop-start
chrom_id1 = 'chr%s'%(chrom_id)
b = np.where(chrom_ori==chrom_id1)[0]
num_sample1 = len(b)
vec1 = np.zeros((num_sample1,n_level+4))
print(chrom_id,len(chrom),len(b))
cnt = 0
m_idx = len(start)-1
start_idx = 0
print("number of regions", len(b))
for i in b:
t_start, t_stop = start_ori[i], stop_ori[i] # position of zero region
position = [t_start,t_stop]
if start_idx<=m_idx:
b1, start_idx = utility_1.search_region_include(position, start, stop, m_idx, start_idx)
# print(count,t_start,t_stop,t_stop-t_start,start_idx,len(id3))
if len(b1)==0:
continue
t_len1, t_score = np.asarray(len1[b1]), np.asarray(score[b1])
t_score[t_score>score_max] = score_max-1e-04
s1 = 0
s2 = np.sum(t_len1)
for j in range(0,n_level):
temp1 = (j-offset)*magnitude
b2 = np.where((t_score<temp1+magnitude)&(t_score>=temp1))[0]
# print(b2)
vec1[cnt,j] = np.sum(t_len1[b2])*1.0/s2
s1 = s1+temp1*vec1[cnt,j]
vec1[cnt,n_level:n_level+4] = [s1,np.median(t_score),np.max(t_score),np.min(t_score)]
cnt += 1
pre_b1 = b1
if cnt%1000==0:
print(chrom_id,cnt,len(b1),s2,vec1[cnt,-4:])
# break
# dict1 = dict()
# dict1['vec'], dict1['index'] = vec1,b
# np.save('phyloP_%s'%(chrom_id),dict1,allow_pickle=True)
fields = ['index']
for j in range(0,n_level):
temp1 = (j-offset)*magnitude
fields.append('%s-%s'%(temp1,temp1+magnitude))
fields.extend(range(0,4))
idx = serial_ori[b]
data1 = pd.DataFrame(data = np.hstack((idx[:,np.newaxis],vec1)),columns=fields)
data1.to_csv('phyloP_%s.txt'%(chrom_id),sep='\t',index=False)
return vec1
def read_motif_1(filename,output_filename=-1):
data1 = pd.read_csv(filename,sep='\t')
colnames = list(data1)
col1, col2, col3 = colnames[0], colnames[1], colnames[2]
chrom, start, stop = np.asarray(data1[col1]), np.asarray(data1[col2]), np.asarray(data1[col3])
region_len = stop-start
m1, m2, median_len = np.max(region_len), np.min(region_len), np.median(region_len)
b1 = np.where(region_len!=median_len)[0]
print(m1,m2,median_len,len(b1))
bin_size = median_len
motif_name = colnames[3:]
mtx1 = np.asarray(data1.loc[:,motif_name])
mtx1 = mtx1*1000.0/np.outer(region_len,np.ones(mtx1.shape[1]))
print('motif',len(motif_name))
print(mtx1.shape)
print(np.max(mtx1),np.min(mtx1),np.median(mtx1))
if output_filename!=-1:
fields = colnames
data1 = pd.DataFrame(columns=fields)
data1[colnames[0]], data1[colnames[1]], data1[colnames[2]] = chrom, start, stop
num1 = len(fields)-3
for i in range(0,num1):
data1[colnames[i+3]] = mtx1[:,i]
data1.to_csv(output_filename,header=True,index=False,sep='\t')
print(output_filename, data1.shape)
return mtx1, chrom, start, stop, colnames
def read_gc_1(ref_filename,header,filename,output_filename):
sel_idx = []
file1 = pd.read_csv(ref_filename,header=header,sep='\t')
f_list = load_seq_altfeature(filename,sel_idx)
# col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name)
colnames = list(file1)
col1, col2, col3, col4 = colnames[0], colnames[1], colnames[2], colnames[3]
chrom_ori, start_ori, stop_ori, serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1[col4])
num_sample = len(chrom_ori)
if num_sample!=f_list.shape[0]:
print('error!',num_sample,f_list.shape[0])
fields = ['chrom','start','stop','serial','GC','GC_N','GC_skew']
file2 = pd.DataFrame(columns=fields)
file2['chrom'], file2['start'], file2['stop'], file2['serial'] = chrom_ori, start_ori, stop_ori, serial_ori
for i in range(0,3):
file2[fields[i+4]] = f_list[:,i]
file2.to_csv(output_filename,index=False,sep='\t')
return f_list
def generate_serial(filename1,chrom,start,stop):
# chrom_vec = np.sort(np.unique(chrom))
# print(chrom_vec)
chrom_vec = []
for i in range(1,23):
chrom_vec.append('chr%d'%(i))
chrom_vec += ['chrX']
chrom_vec += ['chrY']
print(chrom_vec)
# print(chrom)
print(len(chrom))
data1 = pd.read_csv(filename1,header=None,sep='\t')
ref_chrom, chrom_size = np.asarray(data1[0]), np.asarray(data1[1])
serial_start = 0
serial_vec = np.zeros(len(chrom))
bin_size = stop[1]-start[1]
print(bin_size)
for chrom_id in chrom_vec:
b1 = np.where(ref_chrom==chrom_id)[0]
t_size = chrom_size[b1[0]]
b2 = np.where(chrom==chrom_id)[0]
if len(b1)>0:
size1 = int(np.ceil(t_size*1.0/bin_size))
serial = np.int64(start[b2]/bin_size)+serial_start
serial_vec[b2] = serial
print(chrom_id,b2,len(serial),serial_start,size1)
serial_start = serial_start+size1
else:
print("error!")
return
return np.int64(serial_vec)
def generate_serial_local(filename1,chrom,start,stop,chrom_num):
# chrom_vec = np.sort(np.unique(chrom))
# print(chrom_vec)
chrom_vec = []
for i in range(1,chrom_num+1):
chrom_vec.append('chr%d'%(i))
chrom_vec += ['chrX']
chrom_vec += ['chrY']
chrom_vec += ['chrM']
print(chrom_vec)
print(chrom)
print(len(chrom))
t_chrom = np.unique(chrom)
data1 = pd.read_csv(filename1,header=None,sep='\t')
ref_chrom, chrom_size = np.asarray(data1[0]), np.asarray(data1[1])
# serial_start = np.zeros(len(chrom))
serial_start = 0
serial_start_1 = dict()
serial_vec = np.zeros(len(chrom))
bin_size = stop[1]-start[1]
print(bin_size)
for chrom_id in chrom_vec:
b1 = np.where(ref_chrom==chrom_id)[0]
t_size = chrom_size[b1[0]]
serial_start_1[chrom_id] = serial_start
size1 = int(np.ceil(t_size*1.0/bin_size))
serial_start = serial_start+size1
for chrom_id in t_chrom:
b2 = np.where(chrom==chrom_id)
serial = np.int64(start[b2]/bin_size)+serial_start_1[chrom_id]
serial_vec[b2] = serial
return np.int64(serial_vec)
def generate_serial_start(filename1,chrom,start,stop,chrom_num=19):
# chrom_vec = np.sort(np.unique(chrom))
# print(chrom_vec)
chrom_vec = []
for i in range(1,chrom_num+1):
chrom_vec.append('chr%d'%(i))
chrom_vec += ['chrX']
chrom_vec += ['chrY']
print(chrom_vec)
print(chrom)
print(len(chrom))
data1 =
|
pd.read_csv(filename1,header=None,sep='\t')
|
pandas.read_csv
|
import pandas as pd
from pathlib import Path
import numpy as np
path = Path
stats = pd.read_csv(path+r'\Remerged\_mlb_game_stats.csv', index_col=False)
people = pd.read_csv(path+r'\Remerged\_mlb_remerged_People.csv', index_col=False)
teams =
|
pd.read_csv(path+r'\Remerged\_mlb_remerged_Teams.csv', index_col=False)
|
pandas.read_csv
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 15 17:14:55 2021
@author: sergiomarconi
"""
import numpy as np
import pandas as pd
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import BaggingClassifier
from mlxtend.classifier import StackingCVClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from mlxtend.classifier import SoftmaxRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import normalize
from imblearn.ensemble import BalancedRandomForestClassifier
from imblearn.ensemble import RUSBoostClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.svm import SVC
import category_encoders as ce
species_to_genus = {
'2PLANT':"NA",
'ABBA':"AB",
'ABLAL':"AB",
"ABLO":'AB',
"ABMA":"AB",
"ABAM":"AB",
'ACNE2': "AC",
'ACNEN': "AC",
'ACPE':"AC",
'ACRU': "AC",
'ACSA3' : "AC",
'ACSA2' :"AC",
'ACFA':"AC2",
'ACKO': "AC2",
'ACGR':"AC2",
'AIAL' : "AI",
'ALRU2': "AL",
'ALVI5':'AL',
'AMLA' : "AM",
'AMEL':'AM',
'ARVIM':"AR",
'BEAL2': "BE",
'BEGL/BENA':"BE",
'BEPA': "BE",
'BELE': "BE",
'BEPO': "BE",
'BETUL':'BE',
'BENE4' : "BE",
'BUBU':"BU",
'BUSI':"BU",
'BOSU2':"BO",
'CACA18':"CA1",
'CADE27':"CA2",
'CAGL8':"CA3",
'CAOV2':"CA3",
'CAOV3':"CA3",
'CAAQ2':'CA',
'CACO15': "CA3",
'CATO6':"CA3",
'CAIL2':"CA3",
'CECA4':"CE1",
'CELA':"CE2",
'CEOC':"CE2",
'CODR':"CO",
'CODI8':"CO",
'COFL2':"CO2",
'DIVI5':"DI",
'ELAN':"EL",
'FAGR':"FA",
'FRAM2':"FR",
'FRAXI':'FR',
'FRNI':'FR',
'LARIX':'LA',
'ILAN':'IL',
'FRPE':"FR",
'GYDI':"GY",
'GUOF':"GU",
'GUSA':"GU",
'GLTR':"GL",
'HALES':"HA",
'JUNI':"JU1",
'JUNIP':"JU2",
'JUVI':"JU2",
'JUOS':"JU2",
'LIST2':"LI1",
'LITU':"LI2",
'MAPO':"MA",
'MAFR':'MA',
'MAGNO':'MA',
'MORU2':"MO",
'NYBI':"NY",
'NYSY':"NY",
'NYAQ2':'NY',
'OXYDE':"OX",
'OXAR':"OX",
'OSVI':'OS',
'PICEA':"PI1",
'PIAL3':"PI2",
'PIAC':"PI3",
'PICO':"PI2",
'PIEL':"PI2",
'PIEN':"PI2",
'PIEC2':"PI2",
'PIFL2':"PI2",
'PIGL':"PI2",
'PIMA':"PI2",
'PINUS':'PI2',
'PIPA2':"PI2",
'PIPO':"PI2",
'PIRU':"PI2",
'PIPOS':"PI2",
'PIPU5':"PI2",
'PIST':"PI2",
'PITA':"PI2",
'PIGL2':"PI2",
'PIED':"PI",
'PIJE':"PI",
'PIRI':'PI',
'PIVI2':'PI',
'PINUS':"PI2",
'PLOC':"PL",
'POTR5':"PO",
'POGR4':"PO",
'PODE3':"PO",
'PRVE':"PR",
'PRVI':"PR",
'PRAV':'PR',
'PRSE2': "PR",
'PRAN3':"PR",
'PSME':"PS",
'QUAL':"QU",
'QUCO2':"QU",
'QUCH':"QU",
'QUCH2':"QU",
'QUHE2':'QU',
'QUERC':"QU",
'QUGE2':"QU",
'QUSH':"QU",
'QULA2':'QU',
"QUPH":"QU",
'QULA3':"QU",
'QUERCUS':"QU",
'QULY':"QU",
'QUMA3':"QU",
'QUMA13':"QU",
'THUJA':"TU",
'PISA2':"PI2",
'TABR2':"TA",
'QUDO':"QU",
'MEPO5':'ME',
'QUMI':"QU",
'QUFA':"QU",
'QUMO4':"QU",
'QUMU':"QU",
'QUNI':"QU",
'QUKE':"QU",
'QUVE':'QU',
'QUWI2':"QU",
'QUPA5':"QU",
'QURU':"QU",
'QUST':"QU",
'RHGL':"RH",
"ROPS":"RO",
'SASSA':'SA',
'SALIX':'SA',
'SYOC':"SY",
'SILA20':"SI",
'SWMA2':"SW",
'TRSE6':"TR",
'TSCA':"TS",
'TSHE':"TS",
'TIAM':"TI",
'TAHE':"TA",
'ULAL':"UL",
'ULAM':"UL",
'ULMUS':"UL",
'ULCR':"UL",
'ULRU':"UL",
}
import os
os.chdir("/blue/ewhite/s.marconi/NeonSpeciesClassification/")
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTENC
from imblearn.over_sampling import ADASYN
from imblearn.under_sampling import TomekLinks
from collections import Counter
from src.hdr import *
from sklearn.preprocessing import normalize
import numpy as np
import pandas as pd
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import BaggingClassifier
from mlxtend.classifier import StackingClassifier
from sklearn.linear_model import LogisticRegressionCV
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from mlxtend.classifier import SoftmaxRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from imblearn.ensemble import BalancedRandomForestClassifier
from imblearn.ensemble import RUSBoostClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
domainid = {
"GUAN": "D04",
"BART": "D01",
"HARV": "D01",
"STEI": "D05",
"TREE": "D05",
"UNDE": "D05",
"SERC": "D02",
"SCBI": "D02",
"OSBS": "D03",
"MLBS": "D07",
"DELA": "D08",
"TALL": "D08",
"BLAN": "D02",
"UKFS": "D06",
"RMNP": "D10",
"BONA": "D19",
"MOAB": "D13",
"DEJU": "D19",
"LENO": "D08",
"DSNY": "D03",
"JERC": "D03",
"KONZ": "D06",
"CLBJ": "D11",
"YELL": "D12",
"NIWO": "D13",
"ABBY": "D16",
"WREF": "D16",
"SJER": "D17",
"PUUM": "D20"
}
def categorical_encoder(cats,y):
import category_encoders as ce
le = LabelEncoder()
le.fit(y)
le = le.transform(y)
enc = ce.LeaveOneOutEncoder(cols=['siteID'])
# enc = enc.fit(cats).transform(cats)
train_enc = enc.fit_transform(cats,le)
return(train_enc)
# prepare input data
def prepare_inputs(X_train, X_test, cats = ['domainID', 'siteID']):
X_train_enc, X_test_enc = list(), list()
# label encode each column
for i in cats:
le = LabelEncoder()
le.fit(X_train[i])
# encode
train_enc = le.transform(X_train[i])
test_enc = le.transform(X_test[i])
# store
X_train_enc.append(train_enc)
X_test_enc.append(test_enc)
return X_train_enc, X_test_enc
min_class = Counter(y_train.taxonID)
unsuited = pd.DataFrame(min_class.items())
only_one = unsuited.iloc[:,1]<2
unsuited = unsuited[only_one][0]
#unsuited = pd.DataFrame(min_class.items())
doble_unsuited = X_train[y_train.taxonID.isin(unsuited)]
X_train=X_train.append(doble_unsuited)
doble_unsuited = y_train[y_train.taxonID.isin(unsuited)]
y_train=y_train.append(doble_unsuited)
min_class = Counter(y_train.taxonID)
min_class = min_class[min(min_class, key=min_class.get)]
min_class
# dimensionality reduction
def kld_reduction(brick, kld_out):
from sklearn import preprocessing
refl = brick.drop(['individualID'], axis=1)
scaler = preprocessing.StandardScaler().fit(refl)
refl = scaler.transform(refl)
kld_groups = getClusters(refl, numBands = 15)
np.savetxt(kld_out, kld_groups, delimiter=",")
individualID=brick["individualID"]
#
brick = brick.drop(columns=['individualID'])
brick = brick.values
all_data = np.zeros([brick.shape[0],1])
for jj in np.unique(kld_groups):
which_bands = kld_groups == jj
#min
new_col = np.apply_along_axis(min, 1, brick[:,which_bands])[...,None]
all_data = np.append(all_data, new_col, 1)
#mean
new_col = np.apply_along_axis(np.mean, 1, brick[:,which_bands])[...,None]
all_data = np.append(all_data, new_col, 1)
#max
new_col = np.apply_along_axis(max, 1, brick[:,which_bands])[...,None]
all_data = np.append(all_data, new_col, 1)
#reappend individualID on the all data dataframe
all_data = pd.DataFrame(all_data)
all_data["individualID"] = individualID
all_data = all_data.drop([0], axis=1)
#
#shift back individualID to first row
cols = list(all_data.columns)
cols = [cols[-1]] + cols[:-1]
all_data = all_data[cols]
return all_data
brick = pd.read_csv("./data/features_0411.csv") #"./data/brdf_spectra_2104b.csv")
metadata = pd.read_csv("./data/metadata_0411.csv") #"./data/metadata_2104b.csv")
metadata = metadata[["individualID", "groupID", "plotID","siteID","elevation","latitude", "longitude",
"taxonID"]]
kld_out="./data/tmp_grps.csv"#"./data/kld_grps_2104b.csv"
nbands = brick.shape[0]
brick.iloc[:,1:nbands] = normalize(brick.iloc[:,1:nbands])
brick = kld_reduction(brick, kld_out)
foo = brick.drop(columns=[ 'individualID'])
ele1 =
|
pd.read_csv("/blue/ewhite/s.marconi/Chapter3/neonVegWrangleR/elevation_all_df_vst.csv")
|
pandas.read_csv
|
from typing import Dict, Union
from keras import Input, Model
from keras.layers import Flatten, Dense, Dropout
from numpy import ndarray
from pandas import DataFrame
from sklearn.base import RegressorMixin
from src.encoding.encoding_parser import EncodingParser
from src.predictive_model.models import PredictiveModels
class NNRegressor(RegressorMixin):
"""
Neural Network regressor, implements the same methods as the sklearn models to make it simple to add
"""
# noinspection PyTypeChecker
def __init__(self, **kwargs: Dict[str, Union[int, str, float]]):
"""initializes the Neural Network regressor
:param kwargs: configuration containing the predictive_model parameters, encoding and training parameters
"""
self._n_hidden_layers = int(kwargs['n_hidden_layers'])
self._n_hidden_units = int(kwargs['n_hidden_units'])
self._activation = str(kwargs['activation'])
self._n_epochs = int(kwargs['n_epochs'])
self._encoding = str(kwargs['encoding'])
self._dropout_rate = float(kwargs['dropout_rate'])
self._encoding_parser = EncodingParser(self._encoding, None, task=PredictiveModels.REGRESSION.value)
self._model = None
def fit(self, train_data: DataFrame, targets: ndarray) -> None:
"""creates and fits the predictive_model
first the encoded data is parsed, then the predictive_model created and then trained
:param train_data: encoded training dataset
:param targets: encoded target dataset
"""
targets =
|
DataFrame(targets, columns=['label'])
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('matplotlib', 'notebook')
import matplotlib
import seaborn as sb
from matplotlib import pyplot as plt
from matplotlib import colors as mpcolors
import numpy as np
from scipy.optimize import linear_sum_assignment
import pandas as pd
# Jupyter Specifics
get_ipython().run_line_magic('matplotlib', 'inline')
from IPython.display import display, HTML
from ipywidgets.widgets import interact, interactive, IntSlider, FloatSlider, Layout, ToggleButton, ToggleButtons, fixed, Output
display(HTML("<style>.container { width:100% !important; }</style>"))
style = {'description_width': '100px'}
slider_layout = Layout(width='99%')
import skfda
from skfda.datasets import fetch_growth
from skfda.exploratory.visualization import plot_fpca_perturbation_graphs
from skfda.preprocessing.dim_reduction.projection import FPCA
from skfda.representation.basis import BSpline, Fourier, Monomial
import hdbscan
import warnings
import math
from tqdm.notebook import tqdm # progress bars
## map stuff
import ipyleaflet
import json
import geopandas as gpd
import pickle as pk
import os
import requests
from ipywidgets import link, FloatSlider, HTML
from branca.colormap import linear
from matplotlib import colors as mpcolors
def corcl(a,b):
if len(set(a)) > 0 or len(set(b)) > 0:
return len(set(a).intersection(set(b)))/float(len(set(a).union(set(b))))
else:
return 1
def match1(a,x):
rtn = [1 if a[i] == x else 0 for i in range(len(a)) ]
return rtn
def rescale(v,d):
""" functional form of correction factor using simple inversion formula
for with v2'=1/(1-v2) the dimensionality correction v = v2 * v2'/(v2'+d/2-1)
projecting equivalent validity at dim = 2"""
if d > 12.:
d = 12.
logd = np.log(0.5*d)
return v*(1.+logd)/(1.+v*logd)
def score_int(a,b):
if len(set(a)) > 0 or len(set(b)) > 0:
return len(set(a).intersection(set(b)))
else:
return 0
def score_int_union(a,b):
if len(set(a)) > 0 or len(set(b)) > 0:
return len(set(a)&set(b))/len(set(a)|set(b)) # length intersection divided by length union
else:
return 0
def matchset(a,x):
rtn = [i for i in range(len(a)) if a[i] == x]
return rtn
def closest_hue(hue,huelist):
mindist = 2.
imin = -1
for i,h in enumerate(huelist):
if h > hue:
dist = min(h-hue,hue+1-h)
else:
dist = min(hue-h,h+1-hue)
if dist < mindist:
mindist = dist
imin = i
return imin
def color_mean_rgb_to_hsv(rgb_colours,weights=None,modal=False):
""" the hue is a circular quantity, so mean needs care
see https://en.wikipedia.org/wiki/Mean_of_circular_quantities
inputs: rgb_colors 1D array of rgb colours with entries [r,g,b]
weights: None,'all' or same length array of weights in 0. to 1. for biasing entries
modal: if Ture then chose hue as mode of hues, otherwise circular mean
"""
pi = np.pi
eps = 0.0001
hsum = 0.
ssum = 0.
vsum = 0.
asum = 0.
bsum = 0.
wsum = 0.
hwsum = 0.
if len(rgb_colours) == 0:
print('Error in color_mean_rgb_to_hsv: empty list of rgb_colours')
return [0.,0.,0.]
if weights == None:
weights = [1. if mpcolors.rgb_to_hsv(c)[1] > 0 else 0. for c in rgb_colours] # designed to exclude -1 unclustered colours
if np.sum(np.array(weights)) < eps:
weights = [1. for c in rgb_colours]
elif weights == 'all':
weights = [1. for c in rgb_colours]
hdic = {}
for i,c in enumerate(rgb_colours):
hsvcol = mpcolors.rgb_to_hsv(c)
h = hsvcol[0]
s = hsvcol[1]
v = hsvcol[2]
if s > eps and v > eps:
asum = asum + np.sin(h*2.*pi)*weights[i]
bsum = bsum + np.cos(h*2.*pi)*weights[i]
hwsum = hwsum + weights[i]
if h in hdic:
hdic.update({h:hdic[h]+1})
else:
hdic.update({h:1})
ssum = ssum + hsvcol[1]*weights[i]
vsum = vsum + hsvcol[2]*weights[i]
wsum = wsum + weights[i]
if modal:
hvals = list(hdic.keys())
hcnts = [hdic[h1] for h1 in hvals]
if len(hcnts) > 0:
hmaxcnt = np.argmax(np.array(hcnts)) # problem if hcnts is empty sequence
else:
hmaxcnt = None
if modal and len(hcnts)>0 and hcnts[hmaxcnt] >= len(rgb_colours)/4:
h = hvals[hmaxcnt]
# print('using modal hue %f with cnts %d',h,hcnts[hmaxcnt])
elif hwsum > eps:
asum = asum/hwsum
bsum = bsum/hwsum
h = np.arctan2(asum,bsum)/(2.*pi)
if h < 0.:
h = 1.+h
else:
h = 0.
if wsum > eps:
s = ssum/wsum
v = vsum/wsum
else:
print('Error in color_mean_rgb_to_hsv: 0 wsum')
s = 0.
v = 0.
# print(rgb_colours,'mean',mpcolors.hsv_to_rgb([h,s,v]))
if h < 0.:
print('error in color_mean, hue out of range',h)
h = 0.
if h > 1.:
print('error in color_mean, hue out of range',h)
h = 1.
return [h,s,v]
def size_order(clusterings):
""" relabel clusters in each clustering in order of increasing size"""
clusterings_o = np.zeros(clusterings.shape,dtype = int)
for i,clustering in enumerate(clusterings):
labels = list(set(clustering)-set([-1]))
sizes = np.zeros(len(labels),dtype = int)
for j,lab in enumerate(labels):
sizes[j] = len(matchset(clustering,lab))
order = np.flip(np.argsort(sizes))
clusterings_o[i,:] = [order[c] if c != -1 else c for c in clustering]
return clusterings_o
def clust_assign(clustering_a,clustering_b,colors_a,colors_b):
""" relables clustering b to match clustering a optimally
according tot he Hungarian algorithm, implemented in scipy
"""
labels_a = list(set(clustering_a))
labels_b = list(set(clustering_b))
scores = np.zeros((len(labels_a),len(labels_b)),dtype=float)
for i,a in enumerate(labels_a):
for j,b in enumerate(labels_b):
scores[i,j] = score_int_union(matchset(clustering_a,a),matchset(clustering_b,b)) # length intersection divided by length union (result 0. to 1. for identity)
assign_a_to_b,assign_b_to_a=scipy.optimize.linear_sum_assignment(scores)
dic_a_2_b = {labels_a[i]:labels_b[j] for i,j in enumerate(assign_a_to_b)}
return dic_a_2_b
def clust(clustering_a,clustering_b,colors_a,colors_b,relabel=True,merge=True):
""" relables clustering b to match clustering a
if more than one cluster in a optimally matches a particular cluster in b, then color of b is merger of colors in a
if more than one cluster in b optimally matches a particular cluster in a, then colors in a merged and split for b
inputs: clustering_a,b are lists of cluster labels by country, colors_a,b are lists of rgb colors by country in same order
returns: newcolors_b in rgb format
NB. colors_b are only used to preserve s,v values relating to probs of cluster membership for b in final colors
NB. the hues of b_cols are determined by the matching of clustering b with clustering a
NB. all elts of same cluster have the same hue
"""
labels_a = list(set(clustering_a))
labels_b = list(set(clustering_b))
newcolors_b = np.zeros((len(colors_b),3),dtype=float)
newcolors_b[:,:] = colors_b[:,:] # initialized as copy of colors_b, colors for each country in clustering b
a_to_b = {}
b_to_a = {}
a_cols = {}
b_cols = {}
# a_to_b mapping of labels a to the label b (+ its match score in a tuple) with largest matching score: ratio of intersecting countries to union
# maxvals_a_to_b are list of max scores for each label in labels_a
# reorder_a is the largest to smallest order of max scores
# labels_a_sort is labels_a reordered by reorder_a :i.e. the labels in a with the best matches to a label in b first
for a in labels_a:
maxscore = 0
maxlab = -2
for b in labels_b:
score = score_int_union(matchset(clustering_a,a),matchset(clustering_b,b)) # length intersection divided by length union (result 0. to 1. for identity)
if score > maxscore:
maxscore = score
maxlab = b
a_to_b.update({a:(maxlab,maxscore)})
maxvals_a_to_b = [a_to_b[a][1] for a in labels_a]
reorder_a = np.flip(np.argsort(maxvals_a_to_b))
labels_a_sort = [labels_a[r] for r in list(reorder_a)]
# same as above for b_to_a
for b in labels_b:
maxscore = 0
maxlab = -2
for a in labels_a:
score = score_int_union(matchset(clustering_a,a),matchset(clustering_b,b))
if score > maxscore:
maxscore = score
maxlab = a
b_to_a.update({b:(maxlab,maxscore)})
maxvals_b_to_a = [b_to_a[b][1] for b in labels_b]
reorder_b = np.flip(np.argsort(maxvals_b_to_a))
labels_b_sort = [labels_b[r] for r in list(reorder_b)]
#print('before relabel')
# relabeling uses labels_b_sort, labels_a_sort, a_to_b, as well as colors_a,b and clustering_a,b
if relabel:
for b in labels_b_sort: # first adjust colors_b to match mapped clusters from a (transfer and merge)
amap = [a for a in labels_a_sort if a_to_b[a][0] == b] # the labels a that prefer b as best match
for a in amap:
alist = matchset(clustering_a,a) # the positions in country list with label a (non empty since a is a label of clustering_a)
a_cols.update({(b,a) : mpcolors.hsv_to_rgb(color_mean_rgb_to_hsv([colors_a[al] for al in alist]))}) # average color of alist for b chosen as color
# print('in relabel a,b,a_cols',a,b,a_cols[(b,a)])
blist = matchset(clustering_b,b) # the positions in country list with label b
amap_t = list(set(amap)-set([-1])) # the labels of real clusters (excluding unclustered set with label -1) that prefer b
if len(amap_t) > 0: # some non-unclustered (ie not -1) clusters that prefer to map to b
# h = sum([mpcolors.rgb_to_hsv(a_cols[a])[0] for a in amap])/len(amap) # average hue from amap
h = color_mean_rgb_to_hsv([a_cols[(b,a)] for a in amap_t],[a_to_b[a][1] for a in amap_t])[0]
for j in blist: # indices of countries with label b
s = mpcolors.rgb_to_hsv(colors_b[j])[1] # take s saturation from b
v = mpcolors.rgb_to_hsv(colors_b[j])[2] # take v value from b
newcolors_b[j,:] = mpcolors.hsv_to_rgb([h,s,v]) # back to rgb
# b_cols[b] = newcolors_b[blist[0]] # first matching elt colour (to extract hue)
b_cols[b] = mpcolors.hsv_to_rgb(color_mean_rgb_to_hsv([newcolors_b[bl] for bl in blist])) # average color of blist chosen as color
#print('before merge')
if merge:
for a in labels_a_sort: # now readjust colors in b that both map to same a (split)
bmap = [b for b in labels_b_sort if b_to_a[b][0] == a]
if len(bmap)>1:
for i,b in enumerate(bmap):
blist = matchset(clustering_b,b)
# h = (mpcolors.rgb_to_hsv(b_cols[b])[0] + mpcolors.rgb_to_hsv(a_cols[a])[0])/2
if (b,a) in list(a_cols.keys()):
h,s0,v0 = color_mean_rgb_to_hsv([b_cols[b],a_cols[(b,a)]]) # mean of current color and that of a class that prefers this b
else:
# h = mpcolors.rgb_to_hsv(colors_b[j])[0]
h,s0,v0 = mpcolors.rgb_to_hsv(b_cols[b])
for j in blist:
# s = mpcolors.rgb_to_hsv(b_cols[b])[1] # take s saturation from b # these two lines cause all elts to have same value as first for s and v
# v = mpcolors.rgb_to_hsv(b_cols[b])[2] # take v from b
s = mpcolors.rgb_to_hsv(colors_b[j])[1] # take s saturation from b
v = mpcolors.rgb_to_hsv(colors_b[j])[2] # take v from b
newcolors_b[j,:]= mpcolors.hsv_to_rgb([h,s,v])
b_cols[b]=mpcolors.hsv_to_rgb([h,s0,v0])
return newcolors_b
def clust_lsa(clustering_a,clustering_b,colors_a,colors_b,base_colors=None,relabel=True,merge=True):
""" relables clustering b to match clustering a, optimally using first linear_sum_assignment,
then augmenting with new clusters for poorly aligned clusters
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html
inputs: clustering_a,b are lists of cluster labels by country, colors_a,b are lists of rgb colors by country in same order
returns: newcolors_b in rgb format
NB. colors_b are only used to preserve s,v values relating to probs of cluster membership for b in final colors
NB. the hues of b_cols are determined by the matching of clustering b with clustering a
NB. all elts of same cluster have the same hue
"""
# print('in clust_lsa')
labels_a = list(set(clustering_a))
labels_b = list(set(clustering_b))
labels_a_clus = list(set(clustering_a)-set([-1])) # all except unclustered class
labels_b_clus = list(set(clustering_b)-set([-1]))
# do linear_sum_assignment based on cost matrix: 1-score_int_union
scores = np.zeros((len(labels_a_clus),len(labels_b_clus)),dtype=float)
score_dict = {}
for i,a in enumerate(labels_a_clus):
for j,b in enumerate(labels_b_clus): # length intersection divided by length union (result 0. to 1. for identity)
scores[i,j] = 1-score_int_union(matchset(clustering_a,a),matchset(clustering_b,b))
score_dict[(a,b)] = scores[i,j]
row_ind,col_ind=linear_sum_assignment(scores)
# construct forward and backward dictionary assignments
if len(row_ind) != len(col_ind):
print('Error: row and col indices have different lengths',row_ind,col_ind)
dic_a_2_b = {labels_a_clus[row_ind[i]]:labels_b_clus[col_ind[i]] for i in range(len(row_ind))}
dic_a_2_b.update({-1:-1})
dic_b_2_a = {labels_b_clus[col_ind[i]]:labels_a_clus[row_ind[i]] for i in range(len(row_ind))}
dic_b_2_a.update({-1:-1})
# introduce new labels for poor matching labels in complete assignment
maxlabel = max(labels_a)
relabel_b = {}
for a in labels_a:
if a not in dic_a_2_b.keys():
dic_a_2_b.update({a:None})
else:
relabel_b[dic_a_2_b[a]]=a
# print('dic a_2_b',dic_a_2_b)
# print('dic b_2_a',dic_b_2_a)
# print('relabel_b I',relabel_b)
for b in labels_b: #unmatched labels b are given new cluster labels
if b not in relabel_b.keys():
maxlabel = maxlabel+1
relabel_b[b]=maxlabel
elif b != -1:
if score_dict[(dic_b_2_a[b] ,b)] > 0.8: # insufficient match, new cluster name
# print('new label',dic_b_2_a[b],b,scoredict[(dic_b_2_a[b] ,b)])
maxlabel = maxlabel+1
relabel_b[b]=maxlabel
# print('relabel_b II',relabel_b)
new_labels_b = np.array([relabel_b[b] for b in labels_b])
newcolors_b = np.zeros((len(colors_b),3),dtype=float)
newcolors_b[:,:] = colors_b[:,:] # initialized as copy of colors_b, colors for each country in clustering b
# relabel colours
if relabel:
for b in labels_b: # first adjust colors_b to match mapped clusters from a (transfer and merge)
if relabel_b[b] in labels_a:
a = dic_b_2_a[b]
alist = matchset(clustering_a,a)
newcol = colors_a[alist[0]]
else:
newcol = base_colors[1+relabel_b[b]]
# print('new color for b from',b,'to',relabel_b[b],'entry +1',newcol)
h = mpcolors.rgb_to_hsv(newcol)[0]
for j in matchset(clustering_b,b): # indices of countries with label b
s = mpcolors.rgb_to_hsv(colors_b[j])[1] # take s saturation from b
v = mpcolors.rgb_to_hsv(colors_b[j])[2] # take v value from b
newcolors_b[j,:] = mpcolors.hsv_to_rgb([h,s,v]) # back to rgb
# no merge yet
return newcolors_b
def cluster_map_colors(cons1,cons2,relabel=True,merge=True):
""" recalculate colors of countries in consensus clustering cons2, based on alignment with clustering cons1
input: two consensus clusterings with completed scans
relabel abnd merge options (default True) as for clust
output: colors2 : the matched coloring of cons2
side_effect : places inverse mapping iidx in cons2 to allow country order alignment
"""
refc1 = cons1.refclustering
refc2 = cons2.refclustering
clusdat1 = np.array([cons1.clusdata[refc1][i] for i in cons1.sidx])
clusdat2 = np.array([cons2.clusdata[refc2][i] for i in cons1.sidx]) # NB not cons2.sidx
if len(clusdat1) != len(clusdat2):
print('Error: country list lengths not equal')
return None
else: ncountries = len(clusdat2)
cons2.iidx = [None]*ncountries
for i, j in zip(range(ncountries), cons2.sidx): cons2.iidx[j] = i # undo cons2.idx reordering
colors1 = np.array([cons1.basecolors[clus+1] for clus in clusdat1])
colors2_c = np.array([cons2.basecolors[clusdat2[cons2.iidx[i]]+1] for i in range(ncountries)] )
colors2_0 = np.array([colors2_c[cons2.sidx[i]] for i in range(ncountries)] )
#colors1 = np.array(cons1.rgblist) # already ordered like scountries
#colors2_c = np.array([cons2.rgblist[cons2.iidx[i]] for i in range(ncountries)] ) # change order back to match countries # DEBUG
#colors2 = np.array([colors2_c[cons2.sidx[i]] for i in range(ncountries)] ) # change order to match scountries of cons1 # DEBUG
# print(np.array(list(zip(clusdat2,mpcolors.rgb_to_hsv(colors2)))))
colors2 = clust_lsa(clusdat1,clusdat2,colors1,colors2_0,base_colors=cons2.basecolors,relabel=relabel,merge=merge)
#for i in range(len(colors2)):
# print(i,clusdat1[i],clusdat2[i],mpcolors.rgb_to_hsv(colors1[i]),mpcolors.rgb_to_hsv(colors2[i]))
return colors1,colors2
def cmap_sankey(clus1,clus2,colors1,colors2,hue_only=True):
cmap12 = {}
for ci in set(clus1):
for i,lab in enumerate(clus1):
if lab == ci:
tmp = colors1[i]
break
cmap12.update({'a'+str(ci):tmp})
for ci in set(clus2):
for i,lab in enumerate(clus2):
if lab == ci:
tmp = colors2[i]
# print(ci,mpcolors.rgb_to_hsv(tmp))
break
cmap12.update({'b'+str(ci):tmp})
if hue_only:
# reset colors to full saturation and value unless sat is 0
cmap12h = {elt:mpcolors.hsv_to_rgb([mpcolors.rgb_to_hsv(cmap12[elt])[0],1,1] if mpcolors.rgb_to_hsv(cmap12[elt])[1]!=0 else [0,0,0]) for elt in cmap12}
else:
cmap12h = cmap12
return cmap12h
def sankey(cons1,cons2,cons1_name='cons1',cons2_name='cons2',relabel=True,merge=True,hue_only=True):
# extract refclustering data and order it according to the scountries list of cons1=cons
# set up dictionary lists of countries for each label
if len(cons1.countries) != len(cons2.countries):
print('Error: lengths of countries not equal',len(cons1.countries),len(cons2.countries))
return
clus1 = [cons1.clusdata[cons1.refclustering][i] for i in cons1.sidx] # ordered like scountries
clus2 = [cons2.clusdata[cons2.refclustering][i] for i in cons1.sidx]
colors1,colors2=cluster_map_colors(cons1,cons2,relabel=relabel,merge=merge)
cmap12=cmap_sankey(clus1,clus2,colors1,colors2,hue_only=hue_only)
dic1 = {lab:[cc for i,cc in enumerate(cons1.scountries) if clus1[i]==lab] for lab in set(clus1)}
dic2 = {lab:[cc for i,cc in enumerate(cons1.scountries) if clus2[i]==lab] for lab in set(clus2)}
df = dic_compare(dic1,dic2)
h1 = hv.Sankey(df,kdims=['c1','c2'],vdims=['val'])
h1.opts(title=cons1_name+' vs '+cons2_name, cmap=cmap12, node_color='index', edge_color='c1', node_alpha=1.0, edge_alpha=0.7)
return h1
# the final cluster alignment
def plot_clusalign(countries,data,report,cols=None):
fig,ax = plt.subplots(1,1,figsize=(10,24))
if cols is not None:
todel = list(set(range(data.shape[1])) - set(cols))
data1 = np.delete(data,todel,1)
else:
data1 = data
img = ax.imshow(data1)
ax.set_yticks(range(len(countries)))
ax.set_yticklabels(countries)
if cols is None:
rep = report
else:
rep = [report[i] for i in cols]
ax.set_xticks(range(len(rep)))
plt.setp(ax.get_xticklabels(), rotation='vertical', family='monospace')
ax.set_xticklabels(rep,rotation='vertical')
#plt.show()
return fig
# Note that the colours are best understood as hue with value v = intensity related to membership prob
# note that unclustered points had probdata values of 0 formerly, now corrected to give outlier_score_
#
# We should be assigning countries to at least 4 categories : probably five. Cluster 0,1,2 other cluster and no cluster (-1)
# Currently the code relies on the color assignments cluster 0 [1,0,0] 1 [0,1,0] 2 [0,0,1] and only works for 3 clusters.
# The unclustered color of [1,1,1] did not disrupt if the probability was always 0 : this will not work with outlier extension
# Other clusters higher in number were assigned rather biassedly to one of 0,1,2 : this needs fixing
#
# count +1 for any RGB component
def cscore(crow,cols):
rgbsc = [0.0]*3
for j in cols:
if crow[j][0] >0:
rgbsc[0] = rgbsc[0]+1
if crow[j][1] >0:
rgbsc[1] = rgbsc[1]+1
if crow[j][2] >0:
rgbsc[2] = rgbsc[2]+1
return rgbsc
# sum RGB components
def cscore_org(crow,cols):
rgbsc = [0.0]*3
for j in cols:
rgbsc[0] = rgbsc[0]+crow[j][0]
rgbsc[1] = rgbsc[1]+crow[j][1]
rgbsc[2] = rgbsc[2]+crow[j][2]
return rgbsc
#sum weighted hues
def hscore_org(crow,cols):
hsvmean = color_mean_rgb_to_hsv([crow[j] for j in cols],'all')
return hsvmean
def hscore_mode_org(crow,cols):
hsvmean = color_mean_rgb_to_hsv([crow[j] for j in cols],'all',modal=True)
return hsvmean
def swizzle_old(countries,data,cols):
rgb = [None]*len(countries)
for i in range(len(countries)):
for j in range(data.shape[1]):
rgbsc = cscore(data[i,:,:],cols)
rgb[i] = np.argmax(rgbsc)
rtn = [None]*len(countries)
cnt = 0
print('-------blue---------')
for i in range(len(rgb)):
if rgb[i] == 2: #blue
rtn[cnt] = i
print(cnt,i,countries[i])
cnt = cnt+1
print('-------green---------')
for i in range(len(rgb)):
if rgb[i] == 1: # green
rtn[cnt] = i
print(cnt,i,countries[i])
cnt = cnt+1
print('-------red---------')
for i in range(len(rgb)):
if rgb[i] == 0: # red
rtn[cnt] = i
print(cnt,i,countries[i])
cnt = cnt+1
print('cnt =',cnt)
return rtn
def swizzleRGB(countries,data,cols):
rgb = [None]*len(countries)
for i in range(len(countries)):
for j in range(data.shape[1]):
rgbsc = cscore(data[i,:,:],cols)
rgb[i] = np.argmax(rgbsc)
rtn = {}
rtn['R']=[]
rtn['G']=[]
rtn['B']=[]
cnt = 0
for i in range(len(rgb)):
if rgb[i] == 2: #blue
rtn['B'].append(countries[i])
cnt = cnt+1
for i in range(len(rgb)):
if rgb[i] == 1: # green
rtn['G'].append(countries[i])
cnt = cnt+1
for i in range(len(rgb)):
if rgb[i] == 0: # red
rtn['R'].append(countries[i])
cnt = cnt+1
print('cnt =',cnt)
return rtn
def swizzle2(countries,data,cols,refcol):
eps = 0.0001
clus = [None]*len(countries)
rgblist = [None]*len(countries)
hsvdic = {}
hsvrefs = [mpcolors.rgb_to_hsv(c) for c in data[:,refcol]]
huesref = np.sort(list(set([hsv[0] for hsv in hsvrefs if hsv[1] > eps])))
# print('huesref',huesref)
for i in range(len(countries)):
hsvsc = hscore_org(data[i,:,:],cols)
hue = hsvsc[0]
sat = hsvsc[1]
if sat <= 0.5: # mean is classed as unclustered
clus[i] = -1
else:
clus[i] = closest_hue(hue,huesref)
hsvdic.update({countries[i]:hsvsc})
rgblist[i] = mpcolors.hsv_to_rgb(hsvsc)
# print('clus',clus,'len',len(clus))
rtn = [None]*len(countries)
cnt = 0
for j in set(clus):
print('-------class',j,'---------')
for i in range(len(countries)):
if clus[i] == j:
rtn[cnt] = i
# print(cnt,i,countries[i],rgblist[i],hsvlist[i])
print(cnt,i,countries[i])
cnt = cnt+1
print('cnt =',cnt)
return rtn,rgblist,hsvdic
def swizzle3(countries,data,cols,refcol,basecolors,refdata,satthresh = 0.7):
eps = 0.0001
clus = [None]*len(countries)
rgblist = [None]*len(countries)
hsvdic = {}
#hsvrefs = [mpcolors.rgb_to_hsv(c) for c in data[:,refcol]]
refclus = np.sort(list(set(refdata))) # cluster classes in reference column
#print('refclus',refclus)
#huesref = np.sort(list(set([hsv[0] for hsv in hsvrefs if hsv[1] > eps])))
huesref = [mpcolors.rgb_to_hsv(basecolors[1+i])[0] for i in refclus if i != -1]
#print('data shape',np.shape(data))
#print('huesref',huesref)
for i in range(len(countries)):
# hsvsc = hscore_org(data[i,:,:],cols)
# hsvsc = hscore_mode_org(data[i,:,:],cols) # using modal hue
hsvsc = hscore_mode_org(data[i,:,:],cols) # using color circle mean hue
hue = hsvsc[0]
sat = hsvsc[1]
if sat <= satthresh: # mean is classed as unclustered
clus[i] = -1
else:
clus[i] = closest_hue(hue,huesref)
#print(i,countries[i],hue,clus[i])
hsvdic.update({countries[i]:hsvsc})
rgblist[i] = mpcolors.hsv_to_rgb(hsvsc)
# print('clus',clus,'len',len(clus))
rtn = [None]*len(countries)
classes = [None]*len(countries)
cnt = 0
dic={}
for j in set(clus):
dic[j]=[]
#print('-------class',j,'---------')
for i in range(len(countries)):
if clus[i] == j:
classes[cnt] = j
rtn[cnt] = i
dic[j].append(countries[i])
hsvdic[countries[i]] = [j]+hsvdic[countries[i]] # add class to hsvdic
# print(cnt,i,countries[i],rgblist[i],hsvlist[i])
#print(cnt,i,countries[i])
cnt = cnt+1
#print('cnt =',cnt)
clus_argsort = np.lexsort((countries,clus)) # lexicographical sort of countries by reference clustering and name
# swcountries = [countries[clus_argsort[i]] for i in range(len(countries))] # sorted country names as above
swclasses = [clus[clus_argsort[i]] for i in range(len(countries))] # sorted country names as above
swrgblist = [rgblist[clus_argsort[i]] for i in range(len(countries))] # sorted country names as above
# return dic,swclasses,rtn,rgblist,hsvdic
return dic,swclasses,clus_argsort,rgblist,hsvdic
def swizzle_class(countries,data,cols,refcol):
clus = [None]*len(countries)
huesref = np.sort(list(set([mpcolors.rgb_to_hsv(c)[0] for c in data[:,refcol]])))
# print('huesref',huesref)
for i in range(len(countries)):
hsvsc = hscore_org(data[i,:,:],cols)
hue = hsvsc[0]
sat = hsvsc[1]
if sat <= 0.5: # mean is classed as unclustered
clus[i] = -1
else:
clus[i] = closest_hue(hue,huesref)
rtn = {}
for cl in set(clus):
rtn[cl]=[]
cnt = 0
for j in set(clus):
# print('-------class',j,'---------')
for i in range(len(countries)):
if clus[i] == j:
rtn[j].append(countries[i])
# print(cnt,i,countries[i])
cnt = cnt+1
print('cnt =',cnt)
return rtn
def swizzleHSV(countries,data,cols,refcol):
rtn = {}
clus = [None]*len(countries)
huesref = np.sort(list(set([mpcolors.rgb_to_hsv(c)[0] for c in data[:,refcol]])))
# print('huesref',huesref)
for i in range(len(countries)):
hsvsc = hscore_org(data[i,:,:],cols)
hue = hsvsc[0]
sat = hsvsc[1]
if sat <= 0.5: # mean is classed as unclustered
clus[i] = -1
else:
clus[i] = closest_hue(hue,huesref)
rtn[countries[i]]=(clus[i],hsvsc[0],hsvsc[1],hsvsc[2])
return rtn
def dic_compare(dic1,dic2):
df = pd.DataFrame(columns=['c1','c2','val'])
cnt=0
for k in dic1:
Nk = len(dic1[k])
s1 = set(dic1[k])
for kk in dic2:
s2 = set(dic2[kk])
#olap = len(s1.intersection(s2))/float(Nk)
olap = len(s1.intersection(s2))
if olap > 0:
df.loc[cnt] = ['a'+str(k),'b'+str(kk),olap]
cnt = cnt+1
return df
def dic2df(dic):
rtn = {k:dic[k].copy() for k in dic}
keys = [x for x in dic]
lenmx = 0
for k in keys:
if len(dic[k])>lenmx:
lenmx = len(dic[k])
kmx = k
for k in keys:
if len(dic[k])<lenmx:
for _ in range(lenmx-len(dic[k])):
rtn[k].append('')
return pd.DataFrame.from_dict(rtn)
#return rtn
def dic_invert(d):
inv = {}
for k, v in d.items():
if isinstance(v,list):
for vv in v:
keys = inv.setdefault(vv, [])
keys.append(k)
else:
keys = inv.setdefault(v, [])
keys.append(k)
for k in inv:
if len(inv[k]) == 1:
inv[k] = inv[k][0]
return inv
def sprint(*args, **kwargs):
output = io.StringIO()
print(*args, file=output, **kwargs)
contents = output.getvalue()
output.close()
return contents
def sprintdic(dic,chosen_country):
global chosen_class
chosen_class = None
for label in dic:
if chosen_country in dic[label]:
chosen_class = label
break
rtn = ''
if chosen_class == None:
#print('Error: chosen_country not classified')
rtn + sprint('Unclassified selection')
elif chosen_class == -1:
rtn = rtn + sprint('unclustered:')
else:
rtn = rtn + sprint('class '+str(chosen_class)+':')
if chosen_class is not None:
countries = np.sort(np.array(dic[chosen_class]))
else:
print("Error sprintdic: no countries in class",chosen_class)
return('')
colwid = max([len(cc) for cc in countries[::2]]) + 5 # padding
for i in range(0,len(countries),2):
if i < len(countries)-1:
rtn = rtn + sprint(countries[i].ljust(colwid)+countries[i+1])
# rtn = rtn + sprint("".join(country.ljust(colwid) for country in [countries[i],countries[i+1]]))
else:
rtn = rtn + sprint(countries[i])
return rtn
class Consensus:
def __init__(self,
cldata,
cases = ['deaths', 'cases', 'cases_lin2020', 'cases_pwlfit', 'cases_nonlin', 'cases_nonlinr'],
ncomp = range(2,16),
minc = range(3,10),
min_samples = range(2,3), # 1 element [2] by default
satthresh = 0.7 # metaparam for swizzle, toward 1 => more unclustered
):
for cc in cases:
if cc not in ['deaths', 'cases', 'cases_lin2020', 'cases_pwlfit', 'cases_nonlin', 'cases_nonlinr']:
print('cases can only be one of:')
print(['deaths', 'cases', 'cases_lin2020', 'cases_pwlfit', 'cases_nonlin', 'cases_nonlinr'])
self.cases = cases
self.ncomp = ncomp
self.minc = minc
self.min_samples = min_samples
self.countries = list(cldata.clusdata_all[cases[0]].keys()) # save countries in first data set as list
self.satthresh = satthresh
self.clusdata = None
self.swcountries=None
self.cldata=cldata
def scan(self,diag=False,progress=True,name=''):
countries = self.countries
lc = len(self.cases)
maxvalid = [None]*lc
maxvalidval= 0.0
maxvalidsc = [None]*lc
maxvalidscval= 0.0
minscore1 = [None]*lc
minscore1val = 999.
minscore2 = [None]*lc
minscore2val = 999.
self.report = [' ']*4*lc
self.reportdata = [None]*4*lc
# runlen = len(self.cldata.clusdata_all[self.cases[0]])
runlen = len(countries)
self.probdata=np.zeros((4*lc,runlen),dtype=float)
self.outlierdata=np.zeros((4*lc,runlen),dtype=float)
self.clusdata = np.zeros((4*lc,runlen),dtype=np.int64)
self.info = pd.DataFrame(columns=['type','minc','mins','ncomp','clustered','unclustered','validity','validitysc','score1','score2'])
infomax = pd.DataFrame(columns=['type','minc','mins','ncomp','clustered','unclustered','validity','validitysc','score1','score2'])
cnt=0
for ic,case in tqdm(list(enumerate(self.cases)), desc=name+'loop over cases' ,disable= not progress): # loop with progress bar instead of just looping over enumerate(cases)
# for ic,case in enumerate(self.cases):
data = self.cldata.clusdata_all[case]
#dat = np.array([data[cc] for cc in data]).astype(float)
dat = np.array([data[cc] for cc in countries]).astype(float)
for i in range(len(dat)): # normalize data
mx = max(dat[i])
dat[i] = [dd/mx for dd in dat[i]]
dat_disc = skfda.representation.grid.FDataGrid(dat,list(range(len(dat[0]))))
if diag:
print('--------------------------',case,'-------------------------------')
maxvalidval= 0.0
maxvalidscval= 0.0
minscore1val = 999.
minscore2val = 999.
for ncomp in self.ncomp: # code will only work if reference value 2 included in range
fpca_disc = FPCA(n_components=ncomp)
fpca_disc.fit(dat_disc)
foo = fpca_disc.transform(dat_disc)
for min_samples in self.min_samples:
for minc in self.minc:
clusterer = hdbscan.HDBSCAN(min_cluster_size=minc,min_samples=min_samples)
labels = clusterer.fit_predict(foo)
# nclus = len(set([x for x in labels if x>-1]))
labelset = np.unique(labels)
nclus = len(np.unique(labels))
if -1 in labelset:
nclus = nclus-1
# nunclustered = sum([1 for x in labels if x==-1])
nunclustered = np.count_nonzero(labels == -1)
# nclustered = sum([1 for x in labels if x>-1])
nclustered = len(labels) - nunclustered
try:
validity = hdbscan.validity.validity_index(foo, labels)
validity = max(validity,0.001)
validitysc = rescale(validity,ncomp)
score1 = 1.0/validitysc + float(nunclustered)/5. + np.abs(float(nclus)-4.)/2.
score2 = float(nunclustered)*(4.+np.abs(nclus-4.))/(validitysc*20.)
if validity > maxvalidval:
maxvalidval = validity
maxvalid[ic] = [(minc,min_samples,ncomp,nclus,nclustered,nunclustered,validity,validitysc,score1,score2)]
self.probdata[ic*4,:] = clusterer.probabilities_[:]
self.outlierdata[ic*4,:] = clusterer.outlier_scores_[:]
self.clusdata[ic*4,:] = labels[:]
self.report[ic*4] = 'max normal validity: %13s,%2d,%3d,%3d,%3d,%5.2f' % (case,minc,ncomp,nclus,nunclustered,validitysc)
self.reportdata[ic*4] = (case,minc,ncomp,nclus,nunclustered,validity,validitysc,score1,score2)
if validitysc > maxvalidscval:
maxvalidscval = validitysc
maxvalidsc[ic] = [(minc,min_samples,ncomp,nclus,nclustered,nunclustered,validity,validitysc,score1,score2)]
self.probdata[ic*4+1,:] = clusterer.probabilities_[:]
self.outlierdata[ic*4+1,:] = clusterer.outlier_scores_[:]
self.clusdata[ic*4+1,:] = labels[:]
self.report[ic*4+1] = 'max scaled validity: %13s,%2d,%3d,%3d,%3d,%5.2f' % (case,minc,ncomp,nclus,nunclustered,validitysc)
self.reportdata[ic*4+1] = (case,minc,ncomp,nclus,nunclustered,validity,validitysc,score1,score2)
if score1 < minscore1val:
minscore1val = score1
minscore1[ic] = [(minc,min_samples,ncomp,nclus,nclustered,nunclustered,validity,validitysc,score1,score2)]
self.probdata[ic*4+2,:] = clusterer.probabilities_[:]
self.outlierdata[ic*4+2,:] = clusterer.outlier_scores_[:]
self.clusdata[ic*4+2,:] = labels[:]
self.report[ic*4+2] = 'min combined score1: %13s,%2d,%3d,%3d,%3d,%5.2f' % (case,minc,ncomp,nclus,nunclustered,validitysc)
self.reportdata[ic*4+2] = (case,minc,ncomp,nclus,nunclustered,validity,validitysc,score1,score2)
if score2 < minscore2val:
minscore2val = score2
minscore2[ic] = [(minc,min_samples,ncomp,nclus,nclustered,nunclustered,validity,validitysc,score1,score2)]
self.probdata[ic*4+3,:] = clusterer.probabilities_[:]
self.outlierdata[ic*4+3,:] = clusterer.outlier_scores_[:]
self.clusdata[ic*4+3,:] = labels[:]
self.report[ic*4+3] = 'min combined score2: %13s,%2d,%3d,%3d,%3d,%5.2f' % (case,minc,ncomp,nclus,nunclustered,validitysc)
self.reportdata[ic*4+3] = (case,minc,ncomp,nclus,nunclustered,validity,validitysc,score1,score2)
if diag:
print('hdbscan: ',minc,'minc: ',min_samples,'mins: ',ncomp ,'FPCAcomponents: ',
nclus,'clusters; ',
nclustered,'clustered; ',
nunclustered,'unclustered; ','validity =',np.round(validity,5),'validitysc =',np.round(validitysc,5),
'score1:',np.round(score1,3),'score2:',np.round(score2,3))
except:
validity=None
if diag:
print('hdbscan: ',minc,'minc: ',min_samples,'mins: ',ncomp ,'FPCAcomponents: ',
nclus,'clusters; ',
nclustered,'clustered; ',nunclustered,'unclustered; ','validity =',validity)
self.info.loc[cnt] = [case,minc,min_samples,ncomp,nclustered,nunclustered,validity,validitysc,score1,score2]
cnt = cnt+1
if diag:
print('--------------------------')
self.probdata2 = np.where(self.probdata==0.,self.outlierdata,self.probdata)
if diag:
print('---------------------------------------------------------')
print('minc,min_samples,ncomp,nclus,nclustered,nunclustered,validity,validitysc,score1,score2')
print('maxvalid ',maxvalid[ic])
print('maxvalidsc ',maxvalidsc[ic])
print('minscore1',minscore1[ic])
print('minscore2',minscore2[ic])
#print('making clusters...')
#self.make_clusters()
#print('swizzling')
#self.swizzle()
def plot_outliers(self):
Nvars = len(self.cases)*4
max_cols = lc
max_rows = Nvars // max_cols
if Nvars % max_cols:
max_rows = max_rows+1
fig,axes = plt.subplots(lc,4,figsize=(4*lc,36))
for n in range(Nvars):
i = n % max_rows
j = int (n/max_rows)
ax = axes[j,i]
ax.scatter(range(len(self.outlierdata[0])),self.probdata[n],color='blue',alpha=0.3,s=40) # blue
ax.scatter(range(len(self.outlierdata[0])),1-self.outlierdata[n],color='red',alpha=0.3,s=20) # red
ax.set_xlabel('country')
ax.set_title(self.report[n])
def make_clusters(self,
refclustering='auto', # # fiducial column; change here.
diag=True):
if len(self.clusdata) == 0:
print('must run a scan to define and fill clusdata first: starting scan')
self.scan()
countries = self.countries
if diag:
print(len(countries),'countries')
if refclustering == 'auto' or refclustering >= 4*len(self.cases):
nrep = len(self.reportdata)
scores = [rep[7] for rep in self.reportdata[3:nrep:4]] # optimal score 2 subset of data reports
refclustering = np.argmin(np.array(scores))*4+3 # ref clustering is clustering with minimal score 2
if diag:
print('reference clustering (numbered from 0) is',refclustering)
self.refclustering = refclustering
else:
self.refclustering = refclustering
clus_argsort = np.lexsort((countries,self.clusdata[self.refclustering])) # lexicographical sort of countries by reference clustering and name
scountries = [countries[clus_argsort[i]] for i in range(len(countries))] # sorted country names as above
self.scountries = scountries
self.sidx = clus_argsort
# self.sinvidx =
self.probdata_s = self.probdata2.copy() # sort the cluster ids and probs to match scountries
self.clusdata_s = self.clusdata.copy()
self.dics = []
for i in range(len(self.probdata2)):
tmp = {}
for j in range(len(scountries)):
self.probdata_s[i,j] = self.probdata2[i,clus_argsort[j]]
self.clusdata_s[i,j] = self.clusdata[i,clus_argsort[j]]
tmp[scountries[j]] = self.clusdata_s[i,j]
self.dics.append(tmp.copy())
self.clusdata_rs = self.clusdata_s[self.refclustering,:]
"""
This is the basic cluster comparison. It suffers from the independent ordering of clusters, which makes the colourings different in each column.
* In general, given the different number of clusters this is a nontrivial problem in graph matching. We adopt a two phase approach in what follows:
* first choose a reference column (here column `refclustering=1` (defined in a cell above), not zero) with a good differentiated clustering.
* relabel the clusters in each other column with the colours of the best matching cluster in the reference column (`coldata_adj`)
* then relabel the colours again in case of split clusters, with the hybrid colour of the source cluster colour in reference column and the destination colour (`coldata_adj2`)
`coldata`, `coldata_adj` and `coldata_adj2` are 3-d matrices: rows labeled by countries, columns labeled by report string (from max scoring), and 3 values for RGB in z-dim.
"""
rawdata = np.transpose(self.probdata_s)
cindex = np.transpose(self.clusdata_s)
ncols = len(set(self.clusdata.flatten()))
if ncols>16:
print('Error: currently only 16 different colours allowed', ncols )
colors = np.array([[1,1,1],[1,0,0],[0,1,0],[0,0,1],[1.,1.,0.],[1.,0.,1.],[0.,1.,1.],[0.5,1.,0.],
[0,1,0.5],[0.5,0,1],[0.5,1,0.5],[0.3,0.7,0.5],[0.5,0.7,0.3],[0.7,0.5,0.3],[0.1,0.7,0.7],[0.7,0.1,0.7]]) # black,red,green,blue,yellow,cyan,magenta,...
# colors = np.concatenate((colors,colors))
self.basecolors = colors
cluscols = np.transpose(colors[cindex[:,:]+1],(2,0,1)) # transpose to allow elementwise multiplication with rawdata with separate r,g,b
self.coldata = np.transpose((cluscols+3*cluscols*rawdata)/4.,(1,2,0)) # transpose back to have colours as elements of 2D array
coldata_c = self.coldata.copy()
coldata_t = np.transpose(coldata_c,(1,0,2))
clusa = self.clusdata_s[self.refclustering]
ca = coldata_t[self.refclustering]
for i in range(0,len(self.clusdata_s)):
if i != self.refclustering:
clusb = self.clusdata_s[i]
cb = coldata_t[i]
# newcolors_b = clust(clusa,clusb,ca,cb,True,False) # only do phase 1 of cluster recolouring
newcolors_b = clust_lsa(clusa,clusb,ca,cb,base_colors=colors,relabel=True,merge=False)
coldata_t[i,:] = newcolors_b[:]
self.coldata_adj = np.transpose(coldata_t,(1,0,2)) # sorted by countries in reference clustering sorted order
coldata_c2 = self.coldata.copy()
coldata_t2 = np.transpose(coldata_c2,(1,0,2))
clusa = self.clusdata_s[self.refclustering]
ca = coldata_t2[self.refclustering]
for i in range(0,len(self.clusdata_s)):
if i != self.refclustering:
clusb = self.clusdata_s[i]
cb = coldata_t2[i]
# newcolors_b = clust(clusa,clusb,ca,cb,True,True) # do phases 1 and 2 or cluster recolouring
newcolors_b = clust_lsa(clusa,clusb,ca,cb,base_colors=colors,relabel=True,merge=True)
coldata_t2[i,:] = newcolors_b[:]
self.coldata_adj2 = np.transpose(coldata_t2,(1,0,2)) # sorted by countries in reference clustering sorted order
def plot_stage(self,stage=1):
scountries = self.scountries
if stage not in [1,2,3]:
print('Currently there are only stages 1, 2, 3')
return
if stage==1:
coldat = self.coldata
elif stage==2:
coldat = self.coldata_adj
elif stage==3:
coldat = self.coldata_adj2
fig,ax = plt.subplots(1,1,figsize=(15,20))
img = ax.imshow(coldat)
ax.set_yticks(range(len(scountries)))
ax.set_yticklabels(scountries)
ax.set_xticks(range(len(self.clusdata_s)))
plt.setp(ax.get_xticklabels(), rotation='vertical', family='monospace')
ax.set_xticklabels(self.report,rotation='vertical')
# fig.colorbar(img)
#plt.show()
return fig
def plot_all_stages(self):
# the three stages of cluster alignment
scountries = self.scountries
fig,axes = plt.subplots(1,3,figsize=(20,20))
ax = axes[0]
img = ax.imshow(self.coldata)
ax.set_yticks(range(len(scountries)))
ax.set_yticklabels(scountries)
ax.set_xticks(range(len(self.clusdata_s)))
plt.setp(ax.get_xticklabels(), rotation='vertical', family='monospace')
ax.set_xticklabels(self.report,rotation='vertical')
ax = axes[1]
img = ax.imshow(self.coldata_adj)
ax.set_yticks(range(len(scountries)))
ax.set_yticklabels(scountries)
ax.set_xticks(range(len(self.clusdata_s)))
plt.setp(ax.get_xticklabels(), rotation='vertical', family='monospace')
ax.set_xticklabels(self.report,rotation='vertical')
ax = axes[2]
img = ax.imshow(self.coldata_adj2)
ax.set_yticks(range(len(scountries)))
ax.set_yticklabels(scountries)
ax.set_xticks(range(len(self.clusdata_s)))
plt.setp(ax.get_xticklabels(), rotation='vertical', family='monospace')
ax.set_xticklabels(self.report,rotation='vertical')
fig.tight_layout(pad=2.0)
#plt.show()
return fig
def swizzle(self,cols=None,satthresh=None):
if satthresh == None:
satthresh = self.satthresh
# scountries = self.scountries # s stands for sorted (by ref clustering and lexicographically)
if cols==None:
self.cols=list(range(4*len(self.cases)))
else:
self.cols = cols
dic,classes,idx,rgblist,hsvdic = swizzle3(self.scountries,self.coldata_adj2,self.cols,self.refclustering,self.basecolors,self.clusdata_rs,satthresh = satthresh)
#print(cols.idx)
self.classes = classes # already swizzle ordered by swizzle3
self.swdat = np.array([self.coldata_adj2[i] for i in idx]) # swdat is swizzle3 sorted coldata_adj2
self.swcountries = [self.scountries[i] for i in idx] # swcountries is swizzle3 sorted scountries
self.swdic = dic
self.swidx = idx
self.rgbdic = {cc:mpcolors.hsv_to_rgb(hsvdic[cc][1:4]) for cc in hsvdic}
self.hsvdic = hsvdic
self.rgblist = rgblist # already swizzle ordered by swizzle3
def plot_swiz(self):
data = self.swdat.copy()
fig,ax = plt.subplots(1,1,figsize=(10,24))
if self.cols is not None:
todel = list(set(range(data.shape[1])) - set(self.cols))
data1 = np.delete(data,todel,1)
else:
data1 = data
img = ax.imshow(data1)
ax.set_yticks(range(len(self.swcountries)))
labels = [cc+' %d' % self.classes[i] for i,cc in enumerate(self.swcountries)]
# ax.set_yticklabels(self.swcountries)
ax.set_yticklabels(labels)
if self.cols is None:
rep = self.report
else:
rep = [self.report[i] for i in self.cols]
ax.set_xticks(range(len(rep)))
plt.setp(ax.get_xticklabels(), rotation='vertical', family='monospace')
ax.set_xticklabels(rep,rotation='vertical')
# plt.show()
return fig
def plot_quantile(self,dtype,cluster='all',chosen_country=None,title=True):
classdic = self.swdic
if cluster == 'all':
classes = list(classdic.keys())
elif isinstance(cluster,list):
classes = cluster
elif cluster == 'own':
for label in classdic:
if chosen_country in classdic[label]:
classes= [label]
break
else:
classes = [cluster]
clusdata_all = self.cldata.clusdata_all
fig, ax = plt.subplots(1,len(classes),figsize=(len(classes)*5,5),squeeze=False)
cnt = 0
# print('classdic',classdic)
for label in classes:
nelts = len(classdic[label])
if chosen_country and chosen_country in classdic[label]:
maxval=np.amax(clusdata_all[dtype][chosen_country])
datchosen = np.maximum(clusdata_all[dtype][chosen_country]/maxval,0.)
#dats = [[max(z/max(clusdata_all[dtype][cc]),0.) for z in clusdata_all[dtype][cc]] for cc in classdic[label] ]
dats=np.zeros((len(classdic[label]),len(clusdata_all[dtype]['Germany'])),dtype=float)
for i,cc in enumerate(classdic[label]):
maxval=np.amax(clusdata_all[dtype][cc])
dats[i,:] = np.maximum(clusdata_all[dtype][cc]/maxval,0.)
dats = np.transpose(np.array(dats))
pdats = [pd.Series(dat) for dat in dats]
qdats = [[pdat.quantile(q) for q in [0.,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.]] for pdat in pdats]
data = np.transpose(np.array(qdats))
#print(np.shape(dats))
#print(np.shape(data))
# data = qdats
x = range(len(data[0]))
clrs = ['#f0f0f0','#c0c0c0','#505050','#303030','#00a0a0','#c0c000','#303030','#505050','#c0c0c0','#f0f0f0'] # clrs[0] not used
for i in range(1,len(data)):
ax[0,cnt].fill_between(x,data[i-1],data[i],alpha=0.8,color=clrs[i-1]);
if label != -1 and title:
ax[0,cnt].set_title('Class '+('%d' % label)+(' with %d ' % nelts)+'elts')
elif title:
ax[0,cnt].set_title('Unclustered'+(' with %d ' % nelts)+'elts')
# ax[cnt].set_xticklabels("") # x labels for cases_nonlinr!
if cnt>0:
ax[0,cnt].set_yticklabels("")
if chosen_country and chosen_country in classdic[label]:
#print(len(x),len(datchosen))
if 'deaths' in dtype:
ax[0,cnt].plot(x,datchosen,alpha=1.0,color='red');
else:
ax[0,cnt].plot(x,datchosen,alpha=1.0,color='green');
cnt = cnt+1
if title:
plt.suptitle(dtype);
return
def make_map(self):
global geog,geog1,clusters,geo_json_data
cldata = self.cldata
def load_data(url, filename, file_type):
r = requests.get(url)
with open(filename, 'w') as f:
f.write(r.content.decode("utf-8"))
with open(filename, 'r') as f:
return file_type(f)
url = 'https://raw.githubusercontent.com/python-visualization/folium/master/examples/data'
country_shapes = f'{url}/world-countries.json'
# Loading a json data structure with geo data using json.load: geo_json_data
geo_json_data = load_data(country_shapes,'json',json.load);
fname = country_shapes
geog = gpd.read_file(fname)
geog1 = gpd.read_file(fname)
# self.clusalign_hsv = swizzleHSV(self.scountries,self.coldata_adj2,self.cols,self.refclustering)
df0list = [[term]+list(self.hsvdic[term]) for term in self.hsvdic]
df0 = pd.DataFrame(df0list, columns = ['name','cluster','hue','sat','val'])
dflist = [[term]+[list(self.hsvdic[term])[0]]+[list(self.hsvdic[term])[1:]] for term in self.hsvdic]
df =
|
pd.DataFrame(dflist, columns = ['name','cluster','hsv'])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import os
input_data_folder = "../labeled_logs_csv"
output_data_folder = "../labeled_logs_csv_processed"
filenames = ["BPIC15_%s_f%s.csv"%(municipality, formula) for municipality in range(1,6) for formula in range(2,3)]
case_id_col = "Case ID"
activity_col = "Activity"
resource_col = "org:resource"
timestamp_col = "time:timestamp"
label_col = "label"
pos_label = "deviant"
neg_label = "regular"
category_freq_threshold = 10
# features for classifier
dynamic_cat_cols = ["Activity", "monitoringResource", "question", "org:resource"]
static_cat_cols = ["Responsible_actor"]
dynamic_num_cols = []
static_num_cols = ["SUMleges"]
static_cols_base = static_cat_cols + static_num_cols + [case_id_col, label_col]
dynamic_cols = dynamic_cat_cols + dynamic_num_cols + [timestamp_col]
cat_cols = dynamic_cat_cols + static_cat_cols
def split_parts(group, parts_col="parts"):
return pd.Series(group[parts_col].str.split(',').values[0], name='vals')
def extract_timestamp_features(group):
group = group.sort_values(timestamp_col, ascending=False, kind='mergesort')
tmp = group[timestamp_col] - group[timestamp_col].shift(-1)
tmp = tmp.fillna(0)
group["timesincelastevent"] = tmp.apply(lambda x: float(x / np.timedelta64(1, 'm'))) # m is for minutes
tmp = group[timestamp_col] - group[timestamp_col].iloc[-1]
tmp = tmp.fillna(0)
group["timesincecasestart"] = tmp.apply(lambda x: float(x / np.timedelta64(1, 'm'))) # m is for minutes
group = group.sort_values(timestamp_col, ascending=True, kind='mergesort')
group["event_nr"] = range(1, len(group) + 1)
return group
def get_open_cases(date):
return sum((dt_first_last_timestamps["start_time"] <= date) & (dt_first_last_timestamps["end_time"] > date))
for filename in filenames:
data = pd.read_csv(os.path.join(input_data_folder, filename), sep=";", encoding='latin-1')
data = data[data["caseStatus"] == "G"] # G is closed, O is open
# switch labels (deviant/regular was set incorrectly before)
data = data.set_value(col=label_col, index=(data[label_col] == pos_label), value="normal")
data = data.set_value(col=label_col, index=(data[label_col] == neg_label), value=pos_label)
data = data.set_value(col=label_col, index=(data[label_col] == "normal"), value=neg_label)
# split the parts attribute to separate columns
ser = data.groupby(level=0).apply(split_parts)
dt_parts =
|
pd.get_dummies(ser)
|
pandas.get_dummies
|
import pyarrow
import pandas
import time
from dask.dataframe.accessor import maybe_wrap_pandas
from dask_accelerated import dask_native
class CustomFilter:
def __init__(self):
self.durations = []
# The default str match operator in Dask, but this version keeps track
# of str match duration
def custom_vanilla(self, obj, accessor, attr, args, kwargs):
start = time.time()
out = getattr(getattr(obj, accessor, obj), attr)(*args, **kwargs)
end = time.time()
self.durations.append(end-start)
return maybe_wrap_pandas(obj, out)
# Performs string matching using Google's native RE2 library
def custom_re2(self, obj, accessor, attr, args, kwargs):
# The number of records in the current batch
number_of_records = obj.size
# The regular expression to be matched
regex = args[0]
# Extract arrow buffers from int input pandas series
arr = pyarrow.Array.from_pandas(obj)
in_buffers = arr.buffers()
# Initialize an empty selection vector and extract it's arrow buffers
out = pandas.array([False] * number_of_records, dtype=bool, copy=False)
out_buffers = pyarrow.Array.from_pandas(out).buffers()
# Do a native evaluation of the regex matching
start = time.time()
dask_native.re2Eval(
number_of_records,
regex,
in_buffers[1].address,
in_buffers[2].address,
in_buffers[1].size,
in_buffers[2].size,
out_buffers[1].address,
out_buffers[1].size
)
end = time.time()
# Reconstruct output selection vector from the underlying buffers
out = pyarrow.Array.from_buffers(pyarrow.bool_(), number_of_records, out_buffers).to_pandas()
self.durations.append(end-start)
return maybe_wrap_pandas(obj, out)
def custom_re2_unaligned(self, obj, accessor, attr, args, kwargs):
# The number of records in the current batch
number_of_records = obj.size
# The regular expression to be matched
regex = args[0]
# Add some padding to the pandas series, which will be removed from the buffers later
obj_with_padding = pandas.concat([pandas.Series(["a"]), obj])
arr = pyarrow.Array.from_pandas(obj_with_padding)
in_buffers_with_padding = arr.buffers()
# Remove padding from buffers to make them unaligned
unaligned_offset = in_buffers_with_padding[1].slice(offset=4)
unaligned_value = in_buffers_with_padding[2].slice(offset=1)
# Verify that the buffers are indeed not aligned to 64 bytes
offset_alignment = unaligned_offset.address % 64
value_alignment = unaligned_value.address % 64
assert 0 not in {offset_alignment, value_alignment}
# Initialize an empty selection vector and extract it's arrow buffers
out = pandas.array([False] * (number_of_records + 1), dtype=bool, copy=False)
out_buffers = pyarrow.Array.from_pandas(out).buffers()
out_buffers[1] = out_buffers[1].slice(offset=1)
# Verify that the buffers are indeed not aligned to 64 bytes
out_alignment = out_buffers[1].address % 64
assert out_alignment != 0
# Do a native evaluation of the regex matching
start = time.time()
dask_native.re2Eval(
number_of_records,
regex,
unaligned_offset.address,
unaligned_value.address,
unaligned_offset.size,
unaligned_value.size,
out_buffers[1].address,
out_buffers[1].size
)
end = time.time()
# Reconstruct output selection vector from the underlying buffers
out = pyarrow.Array.from_buffers(pyarrow.bool_(), number_of_records, out_buffers).to_pandas()
self.durations.append(end-start)
return maybe_wrap_pandas(obj, out)
# Performs string matching using Tidre on FPGA
def custom_tidre(self, obj, accessor, attr, args, kwargs):
# The number of records in the current batch
number_of_records = obj.size
# Extract arrow buffers from int input pandas series
arr = pyarrow.Array.from_pandas(obj)
in_buffers = arr.buffers()
# Initialize an empty selection vector and extract it's arrow buffers
out =
|
pandas.array([False] * number_of_records, dtype=bool, copy=False)
|
pandas.array
|
"""
Compares TCA with Bellwether Method (SEER to be added)
"""
from __future__ import print_function, division
import os
import sys
root = os.path.join(os.getcwd().split('src')[0], 'src')
if root not in sys.path:
sys.path.append(root)
from SEER.SEER import seer_jur
from TCA.execute import tca_jur
import multiprocessing
from pdb import set_trace
from stats.effect_size import hedges_g_2
import pandas
from utils import print_pandas
# Note To Self: REFACTOR THE TRANSFER LEARNER
def compute(method):
return method()
if __name__ == "__main__":
methods = [seer_jur, tca_jur]
pool = multiprocessing.Pool(processes=2)
a = pool.map(compute, methods)
projects = a[0].keys()
for p in projects:
print("\\textbf{" + str(p) + "}")
bell = a[0][p].set_index("Name").sort_index() # Rename index and sort alphabetically
tca = a[1][p].set_index("Name").sort_index() # Rename index and sort alphabetically
both =
|
pandas.concat([tca, bell], axis=1, join_axes=[tca.index])
|
pandas.concat
|
import torch
import numpy
import bionetwork
import plotting
import pandas
import saveSimulations
import matplotlib.pyplot as plt
import copy
import seaborn as sns
#Load network
networkList, nodeNames, modeOfAction = bionetwork.loadNetwork('data/KEGGnet-Model.tsv')
annotation =
|
pandas.read_csv('data/KEGGnet-Annotation.tsv', sep='\t')
|
pandas.read_csv
|
"""
References:
[1] stackoverflow.com/questions/59308710/iou-of-multiple-boxes
[2] math.stackexchange.com/questions/99565/simplest-way-to-calculate-the-intersect-area-of-two-rectangles
[3] ronny.rest/tutorials/module/localization_001/iou/
"""
import numpy as np
import pandas as pd
import shapely.geometry
import shapely.ops
from tqdm import tqdm
from utils.paths import path_downloads, path_metadata, path_scale
def scale(df, multi):
df = df[df['multi'] == multi]
filepaths = df['filepath'].unique()
if multi == False:
scale = list((df['x_max'] - df['x_min']) * (df['y_max'] - df['y_min']))
else:
scale = []
for filepath in tqdm(filepaths):
df_boxes = df[df['filepath'] == filepath]
boxes = np.array(df_boxes[['x_min', 'y_min', 'x_max', 'y_max']])
boxes = [shapely.geometry.box(*b) for b in boxes]
union = shapely.ops.unary_union(boxes)
scale.append(union.area)
return pd.DataFrame(data={'filepath':filepaths, 'scale':scale})
path_bounding_boxes = path_downloads/'imagenet_2012_bounding_boxes.csv'
column_names = ('filepath', 'x_min', 'y_min', 'x_max', 'y_max')
df = pd.read_csv(path_bounding_boxes, names=column_names, header=None)
df['multi'] = df.duplicated(subset=['filepath'], keep=False)
df_single = scale(df, multi=False)
df_multi = scale(df, multi=True)
df_scale =
|
pd.concat((df_single, df_multi), ignore_index=True)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 22 10:30:01 2016
SC process signups functions
@author: tkc
"""
#%%
import pandas as pd
import numpy as np
from datetime import datetime, date
import re, glob, math
from openpyxl import load_workbook # writing to Excel
from PIL import Image, ImageDraw, ImageFont
import tkinter as tk
import pkg.SC_config as cnf # _OUTPUT_DIR and _INPUT_DIR
#%%
def combinephrases(mylist):
''' Combine list of phrases using commas & and '''
if len(mylist)==1:
return str(mylist[0])
elif len(mylist)==2:
tempstr=str(mylist[0])+ ' and ' +str(mylist[1])
return tempstr
else:
rest=mylist[:-1]
rest=[str(i) for i in rest]
last=mylist[-1]
tempstr=', '.join(rest) +' and ' + str(last)
return tempstr#%%
def writetoxls(df, sheetname, xlsfile):
''' Generic write of given df to specified tab of given xls file '''
book=load_workbook(xlsfile)
writer=pd.ExcelWriter(xlsfile, engine='openpyxl', datetime_format='mm/dd/yy', date_format='mm/dd/yy')
writer.book=book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
df.to_excel(writer,sheet_name=sheetname,index=False) # this overwrites existing file
writer.save() # saves xls file with all modified data
return
def loadtransfers(df, signups):
''' Load transferred players and add to signups (then run player ID);
transfers added as normal players but need fake billing entries
'''
df=df.rename(columns={'Fname':'First','Lname':'Last','Street':'Address','Parish':'Parish of Registration'})
df=df.rename(columns={'Phone':'Phone1','Birthdate':'DOB','Sex':'Gender','Open/Closed':'Ocstatus'})
# Replace Girl, Boy with m f
df.loc[:,'Gender']=df.Gender.replace('F','Girl')
df.loc[:,'Gender']=df.Gender.replace('M','Boy')
# Manually enter sport
print('Enter sport for transferred players')
sport=input()
df.loc[:,'Sport']=sport
df=df.dropna(subset=['First']) # remove blank rows if present
mycols=[col for col in df if col in signups]
df=df[mycols]
df=formatnamesnumbers(df)
# place date/transfer in timestamp
mystamp=datetime.strftime(datetime.now(),'%m/%d/%y')+' transfer'
df.loc[:,'Timestamp']=mystamp
mycols=signups.columns
signups=signups.append(df, ignore_index=True)
signups=signups[mycols]
return signups
def packagetransfers(teams, Mastersignups, famcontact, players, season, year, acronyms, messfile):
''' Package roster and contact info by sport- school and save as separate xls files
also generate customized e-mails in single log file (for cut and paste send to appropriate persons)
args:
teams - loaded team list
mastersignups - signups w/ team assignment
players -player DB
famcontact - family contact db
season - Fall, Winter or Spring
year - starting sports year (i.e. 2019 for 2019-20 school year)
acronyms - school/parish specific abbreviations
messfile - e-mail message template w/ blanks
returns:
'''
teams=teams[pd.notnull(teams['Team'])]
transferteams=np.ndarray.tolist(teams[teams['Team'].str.contains('#')].Team.unique())
transSU=Mastersignups[Mastersignups['Team'].isin(transferteams)]
# ensure that these are from correct season/year
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
transSU=transSU.loc[(transSU['Sport'].isin(sportlist)) & (transSU['Year']==year)] # season is not in mastersignups... only individual sports
# get family contact info from famcontacts
transSU=pd.merge(transSU, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get school from players.csv
transSU=pd.merge(transSU, players, how='left', on=['Plakey'], suffixes=('','_r2'))
# get division from Teams xls (for roster)
transSU=pd.merge(transSU, teams, how='left', on=['Team'], suffixes=('','_r3')) # effectively adds other team info for roster toall players
transSU.loc[:,'Role']='Player' # add column for role
# transSU['Open/Closed']='Closed'
# Sort by grade pre-split
transSU.loc[:,'Grade']=transSU.Grade.replace('K',0)
transSU.loc[:,'Grade']=transSU.Grade.apply(int)
transSU=transSU.sort_values(['Grade'], ascending=True)
transSU.loc[:,'Grade']=transSU.Grade.replace(0,'K') # replace K with zero to allow sorting
# Column for sorting by transferred to school
transSU.loc[:,'Transchool']=transSU['Team'].str.split('#').str[0]
grouped=transSU.groupby(['Sport','Transchool'])
for [sport, school], group in grouped:
# prepare roster tab
xlsname=cnf._OUTPUT_DIR+'\\Cabrini_to_'+school+'_'+sport+'_'+str(year)+'.xlsx'
writer=pd.ExcelWriter(xlsname, engine='openpyxl')
Transferroster=organizeroster(group)
Transferroster=Transferroster.sort_values(['Team', 'Sex', 'Grade'], ascending=True)
Transferroster=replaceacro(Transferroster,acronyms)
Transferroster.to_excel(writer,sheet_name='roster',index=False)
# prep contacts tab
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2',
'Email2', 'Pfirst1', 'Plast1', 'Pfirst2', 'Plast2', 'Team']
Transfercontacts=group[mycols]
Transfercontacts.to_excel(writer, sheet_name='contacts', index=False)
writer.save()
# Now generate list of e-mails for all schools/directors
logfile='transfers_director_emails_log.txt'
with open(logfile,'w+') as emaillog:
# Read generic file to sport director
with open(messfile, 'r') as file:
blankmessage=file.read()
for [sport, school], group in grouped:
plagroup=group.groupby(['Grade', 'Gender'])
platypes=[] # list of # of players by grade, gender
gradedict={'K':'K', 1:'1st', 2:'2nd',3:'3rd',4:'4th',5:'5th',6:'6th', 7:'7th',8:'8th'}
genderdict={'f':'girls', 'F':'girls','m':'boys','M':'boys'}
for [grade, gender], group in plagroup:
numplays=str(int(group['Grade'].count()))
grname=gradedict.get(grade)
genname=genderdict.get(gender)
platypes.append(numplays+' '+grname+' '+genname)
plalist=combinephrases(platypes)
thismess=blankmessage.replace('$SCHOOL', school)
thismess=thismess.replace('$SPORT', sport)
thismess=thismess.replace('$PLALIST', plalist)
emaillog.write(thismess)
emaillog.write('\n\n')
return
def findcards():
'''Search ID cards folder and return player # and file link
cards resized to 450x290 pix jpg in photoshop (scripts-image processor)
keys are either player number as string or coach CYC ID, vals are links to files'''
cardlist=glob.glob('%s\\IDcards\\*.jpg' %cnf._OUTPUT_DIR, recursive=True)
# construct list of [card #, filename]
nums=[i.split('\\')[-1] for i in cardlist]
nums=[i.split('_')[0] if '_' in i else i.split('--')[0] for i in nums ]
cards={} # dict for card numbers/filenames
for i,num in enumerate(nums):
cards.update({num: cardlist[i]})
return cards
def makethiscard(IDlist, team):
''' Passes link to ID card or player name (if missing) From team's list of player numbers (in alphabetical order), find/open card links, and create single image'''
# make the master image and determine image array size
margin=10 # pix on all sides
if len(IDlist)<11: # use 2 x 5 array (horiz)
wide=2
high=5
elif len(IDlist)<13: # 4w x 3 h (vert)
wide=4
high=3
elif len(IDlist)<22: # 3x by 5-7 high (horiz); max 21
wide=3
high=math.ceil(len(IDlist)/3)
else: # more than 21 ... yikes
wide=3
high=math.ceil(len(IDlist)/3)
cardimage = Image.new('RGB', (450*wide+2*margin, 300*high+2*margin), "white") # blank image of correct size
draw=ImageDraw.Draw(cardimage) # single draw obj for adding missing card names
ttfont=ImageFont.truetype('arial.ttf', size=36)
for i,fname in enumerate(IDlist):
row=i//high # remainder is row
col=i%high # mod is correct column
xpos=margin+row*450
ypos=margin+col*300
try:
thiscard=Image.open(fname)
thiscard=thiscard.resize((450, 300), Image.ANTIALIAS)
cardimage.paste(im=thiscard, box=(xpos, ypos)) # paste w/ xpos,ypos as upper left
except: # occurs when "first last" present instead of file name/path
# blankcard=Image.new('RGB', (450, 300)) # make blank image as placeholder
draw.text((xpos+50,ypos+100),fname,font=ttfont, fill="red")
return cardimage
''' TESTING
i=0 team=teamlist[i]
'''
def makeCYCcards(df, players, teams, coaches, season, year, **kwargs):
''' From mastersignups and teams, output contact lists for all teams/all sports separately
team assignments must be finished
args:
df -- mastersignups dataframe
players - player info dataframe
teams - this year's teams csv
coaches - full coach CYC info list
season - Fall, Winter or Spring
kwargs:
showmissing - True (shows missing player's name); False- skip missing player
otherSchools - default False (also make card sheets for transferred teams/players)
kwargs={'showmissing':False}
missing = makeCYCcards(Mastersignups, players, teams, coaches, season, year, **{'showmissing':True} )
missing = makeCYCcards(Mastersignups, players, teams, coaches, season, year, **{'showmissing':False} )
'''
# Slice by sport: Basketball (null for winter?), Soccer, Volleyball, Baseball, T-ball, Softball, Track)
cards=findcards() # dictionary with number: filename combo for existing CYC cards
df=df[(df['Year']==year)]
df=df.reset_index(drop=True)
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
df=df[df['Sport'].isin(sportlist)] # season is not in mastersignups... only individual sports
# Make list of teams that need cards (all track and others >1st grade)
def processGrade(val):
if val=='K':
return 0
else:
return int(val)
teams.loc[:,'Grade'] = teams['Grade'].apply(lambda x:processGrade(x))
if not kwargs.get('otherSchools', False):
# all transfer teams with contain # (e.g. SMOS#3G) so remove these
# dropped by default
teams = teams[~teams['Team'].str.contains('#')]
# need track teams or any team from grades 2+
cardTeamList= teams[ (teams['Grade']>1) | (teams['Sport']=='Track') ]['Team'].unique()
df=df[ df['Team'].isin(cardTeamList) ]
df=df.sort_values(['Last'])
# plakeys as string will be easiest for below matching
df.loc[:,'Plakey']=df['Plakey'].astype(int)
df.loc[:,'Plakey']=df['Plakey'].astype(str)
def getName(gr, pk):
# get name from plakey as string
match=gr[gr['Plakey']==pk]
name=match.iloc[0]['First'] + ' ' + match.iloc[0]['Last']
return name
teamgrouped = df.groupby(['Team'])
missinglist=[] # list of plakeys with missing card
for team, gr in teamgrouped:
# keys in card dict are strings
IDlist = [str(int(i)) for i in gr.Plakey.unique()]
missinglist.extend([i for i in gr.Plakey.unique() if i not in cards.keys() ])
if not kwargs.get('showmissing', False):
# Shows only valid cards, drops missing names
IDlist = [ cards.get(i) for i in IDlist if i in cards.keys() ]
filename='Cards_'+ team +'.jpg'
else: # show cards and missing name when card image not in IDcards folder
IDlist = [cards.get(i) if i in cards.keys() else getName(gr, i) for i in IDlist ]
filename='Cards_'+ team +'_all.jpg'
# get team's coaches
IDlist.extend(getcoachIDs(team, teams, coaches, cards)) # add coach ID image file or first/last if missing
cardimage =makethiscard(IDlist, team) # directly saved
# save the card file
cardimage.save(cnf._OUTPUT_DIR+'\\'+filename)
missingcards=players[players['Plakey'].isin(missinglist)]
missingcards=missingcards.sort_values(['Grade','Last'])
return missingcards
def getcoachIDs(team, teams, coaches, cards):
''' Returns CYC IDs for all team's coaches '''
thisteam=teams[teams['Team']==team]
IDlist=[]
if len(thisteam)!=1:
print(team, 'not found in current teams list')
return IDlist # blank list
thisteam=thisteam.dropna(subset=['Coach ID'])
if len(thisteam)!=1:
print('Coach ID not found for', team)
return IDlist # blank list
if thisteam.iloc[0]['Coach ID']!='': # possibly blank
thisID=thisteam.iloc[0]['Coach ID'].strip()
if thisID in cards:
IDlist.append(cards.get(thisID,'')) # file path to this coach's ID
else: # get first/last
thiscoach=coaches[coaches['Coach ID']==thisID]
if len(thiscoach)==1:
IDlist.append(thiscoach.iloc[0]['Fname']+' '+thiscoach.iloc[0]['Lname'])
else:
print("Couldn't find coach ", thisID)
thisteam=thisteam.dropna(subset=['AssistantIDs'])
if len(thisteam)==1: # grab asst IDs if they exist
asstIDs=thisteam.iloc[0]['AssistantIDs']
asstIDs=[str(s).strip() for s in asstIDs.split(",")]
for i, asstID in enumerate(asstIDs):
if asstID in cards:
IDlist.append(cards.get(asstID,'')) # found assistant coaches ID card image
else: # can't find ... get assistant first last
thisasst=coaches[coaches['Coach ID']==asstID] # matching asst coach row
if len(thisasst)==1:
IDlist.append(thisasst.iloc[0]['Fname']+' '+thisasst.iloc[0]['Lname'])
else:
print("Couldn't find coach ", asstID)
return IDlist
def autocsvbackup(df, filename, newback=True):
''' Pass df (i.e players for backup and basename (i.e. "family_contact" for file.. finds list of existing backups and keeps ones of
certain ages based on targetdates list;
can't remember why was newback=False was needed (always true here to make new backup)
'''
# TODO fix this!
pass
return
def parseDate(val):
'''
Conversion of date string to datetime.date (always header line 2 40:60)
Possible date formats: 20180316 (newer style) or 03/15/2018 (older style)
For NGA files Date format changed from 03/15/2018 to 20180316 (on jday 75 in 2018)
time format: 221100 or 22:11:00 (sometimes w/ UTC)
not terribly concerned w/ time
possible date formats: 0) 03/01/2018, 3/1/2018, 3/1/18 or 03/01/18
2) 1/1/19 2) 2019-1-1 3) 2019-01-01
'''
if not isinstance(val, str):
return val
else:
if ' ' in val: # Remove time substring (but will fail for 3 Oct 2019)
val=val.split(' ')[0] # strip time substring if present
patterns=['\d{1,2}/\d{1,2}/\d{2,4}', '\d{4}-\d{1,2}-\d{1,2}', '\d{1,2}-\d{1,2}-\d{4}']
for i, patt in enumerate(patterns):
match=re.search(r'%s' %patt, val)
if match:
if i==0: # Extract 03/16/2018 (or rarely 28/10/2019 style)
try:
(mo,dy,yr)=[int(i) for i in val.split('/')]
if yr<100 and len(str(yr))==2: # handle 2 digit year
yr=int('20'+str(yr))
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return None
if i==1: # extract 2017-01-01 style (year first)
try:
(yr,mo,dy)=[int(i) for i in val.split('-')]
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return val
if i==2: # extract 01-01-2019 style (year last)
try:
(mo,dy,yr)=[int(i) for i in val.split('-')]
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return val
def loadProcessPlayerInfo():
'''Loads and processes players & family contacts (but not signup file)
args:
gsignups -- google signups
season - 'Fall', 'Winter', 'Spring
year - 4 digit int (uses fall value all school year.. ie. 2018-19 year is always
2018)
'''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if not isinstance(players.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
players.loc[:,'DOB']=players.DOB.apply(lambda x: parseDate(x))
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
famcontact=formatnamesnumbers(famcontact)
return players, famcontact
def loadProcessGfiles(gsignups, season, year):
'''Loads and processes players, family contacts and signup file, gets active
season and year
args:
gsignups -- google signups
season - 'Fall', 'Winter', 'Spring
year - 4 digit int (uses fall value all school year.. ie. 2018-19 year is always
2018)
'''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if not isinstance(players.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
players.loc[:,'DOB']=players.DOB.apply(lambda x: parseDate(x))
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
if season=='Winter':
gsignups['Sport']='Basketball'
# TODO determine where multiple sports converted to separate lines
duplicated=gsignups[gsignups.duplicated(subset=['First', 'Last','Grade','Sport'])]
if len(duplicated)>0:
print('Remove duplicate signups for %s' %", ".join(duplicated.Last.unique().tolist()))
gsignups=gsignups.drop_duplicates(subset=['First', 'Last','Grade','Sport'])
gsignups.loc[:,'Sport']=gsignups['Sport'].str.replace('Volleyball','VB')
#gsignups.loc[:,'Sport']=gsignups.loc[:,'Sport'].str.replace('Volleyball','VB').copy()
#gsignups.loc[:,'Sport']=gsignups['Sport'].replace({'Volleyball':'VB'}, regex=True).copy()
missing=[i for i in ['Famkey','Plakey'] if i not in gsignups.columns]
for col in missing: # add blank vals
gsignups.loc[gsignups.index, col]=np.nan
# convert assorted DOB strings to datetime.date
if not isinstance(gsignups.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
gsignups.loc[:,'DOB']=gsignups.DOB.apply(lambda x: parseDate(x))
# Get year from signup file name
outputduplicates(gsignups) # quick check of duplicates output in console window (already removed from signups)
gsignups=formatnamesnumbers(gsignups) # format phone numbers, names to title case, standardize schools, etc.
famcontact=formatnamesnumbers(famcontact)
def processGkey(val):
''' Some plakey/famkey copied to drive... must convert nan(float), whitespace or
number as string to either nan or int
'''
if isinstance(val, str):
val=''.join(val.split(' '))
if val=='':
return np.nan
else:
try:
return int(val)
except:
return np.nan
else:
return np.nan
# ensure gsignups has only int or nan (no whitespace)
gsignups.loc[:,'Plakey']=gsignups['Plakey'].apply(lambda x: processGkey(x))
gsignups.loc[:,'Famkey']=gsignups['Famkey'].apply(lambda x: processGkey(x))
return players, famcontact, gsignups
def loadprocessfiles(signupfile):
'''Loads and processes players, family contacts and signup file, gets active
season and year '''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if type(players.DOB[0])!=pd.Timestamp: # sometimes direct import to pd timestamp works, other times not
try:
players.loc[:'DOB']=parseDate(players.DOB) # return properly converted date columns series
except:
print('Failure converting player DOB to datetime/timestamp')
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
# read this season's sports signup file and rename columns
if signupfile.endswith('.csv'):
SUraw=pd.read_csv(signupfile)
elif 'xls' in signupfile:
try:
SUraw=pd.read_excel(signupfile, sheetname='Raw') # may or may not have plakey/famkey
except:
SUraw=pd.read_excel(signupfile)
if SUraw.shape[1]==30 and 'Plakey' in SUraw.columns:
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School','Grade',
'Address','Zip','Parish','Sport','AltPlacement','Ocstatus','Pfirst1',
'Plast1','Phone1','Text1','Email','Othercontact','Coach','Pfirst2','Plast2',
'Phone2','Text2','Email2','Coach2','Unisize','Unineed','Plakey','Famkey']
elif SUraw.shape[1]==28 and 'Plakey' in SUraw.columns:
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School','Grade',
'Address','Zip','Parish','Sport','AltPlacement','Ocstatus','Pfirst1',
'Plast1','Phone1','Text1','Email','Othercontact','Coach','Pfirst2','Plast2',
'Phone2','Text2','Email2','Coach2','Plakey','Famkey']
elif SUraw.shape[1]==26 and 'Plakey' not in SUraw.columns: # Raw value without plakey and famkey
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School',
'Grade','Address','Zip','Parish','Sport','AltPlacement','Ocstatus',
'Pfirst1','Plast1','Phone1','Text1','Email','Othercontact','Coach',
'Pfirst2','Plast2','Phone2','Text2','Email2','Coach2']
elif SUraw.shape[1]==28 and 'Plakey' not in SUraw.columns: # Raw value without plakey and famkey
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School',
'Grade','Address','Zip','Parish','Sport','AltPlacement','Ocstatus',
'Pfirst1','Plast1','Phone1','Text1','Email','Othercontact','Coach',
'Pfirst2','Plast2','Phone2','Text2','Email2','Coach2','Unisize','Unineed']
SUraw.loc[SUraw.index,'Plakey']=np.nan # add if absent
SUraw.loc[SUraw.index,'Famkey']=np.nan
signups=SUraw.drop_duplicates(subset=['First', 'Last','Grade','Sport'])
signups['Sport'].replace({'Volleyball':'VB'},inplace=True, regex=True)
# Get year from signup file name
season=re.match(r'(\D+)', signupfile).group(0) # season at string beginning followed by year (non-digit)
if '\\' in season: # remove file path problem
season=season.split('\\')[-1]
year=int(re.search(r'(\d{4})', signupfile).group(0)) # full year should be only number string in signups file
outputduplicates(SUraw) # quick check of duplicates output in console window (already removed from signups)
signups=formatnamesnumbers(signups) # format phone numbers, names to title case, standardize schools, etc.
famcontact=formatnamesnumbers(famcontact)
return players, famcontact, signups, season, year
def findavailablekeys(df, colname, numkeys):
'''Pass df and colname, return a defined number of available keys list
used for players, families, signups, etc.
'''
# list comprehension
allnums=[i for i in range(1,len(df))]
usedkeys=df[colname].unique()
usedkeys=np.ndarray.tolist(usedkeys)
availkeys=[i for i in allnums if i not in usedkeys]
if len(availkeys)<numkeys: # get more keys starting at max+1
needed=numkeys-len(availkeys)
for i in range(0,needed):
nextval=int(max(usedkeys)+1) # if no interior vals are available find next one
availkeys.append(nextval+i)
availkeys=availkeys[:numkeys] # truncate and only return the requested number of needed keys
return availkeys
def organizeroster(df):
''' Renaming, reorg, delete unnecessary columns for CYC roster output
already split by sport and year'''
df=df.rename(columns={'First':'Fname','Last':'Lname','Address':'Street','Parish_registration':'Parish of Registration'})
df=df.rename(columns={'Parish_residence':'Parish of Residence','Phone1':'Phone','DOB':'Birthdate','Gender':'Sex'})
df=df.rename(columns={'Email1':'Email'})
# replace Girl, Boy with m f
df.loc[:,'Sex']=df.Sex.replace('Girl','F').replace('Boy','M')
df.loc[:,'Sex']=df.Sex.str.upper() # ensure uppercase
# Convert date format to 8/25/2010 string format
mycols=['Fname', 'Lname', 'Street', 'City', 'State', 'Zip', 'Phone', 'Email', 'Birthdate', 'Sex', 'Role', 'Division', 'Grade', 'Team', 'School', 'Parish of Registration', 'Parish of Residence', 'Coach ID']
df=df[mycols] # put back in desired order
df=df.sort_values(['Team'])
return df
'''TESTING row=tempplay.iloc[7]
signups=signups[signups['Last']=='Elston']
'''
def processdatachanges(signups, players, famcontact, year):
'''Pass SC signups subset from google drive, update address for more up-to-date
contact information, new address, etc.
must start here if troubleshooting
args:
signups -- online signups file (normally google drive)
players - player DOB, grade, etc
famcontact- family contact info
year - sports year (int); e.g. 2019 for 2019-20 school year
'''
# Using all entries from signups (manual and gdrive)
# Updates from paper signups should be done directly to famcontact and players csv files (skip entirely)
'''
signups.Timestamp=pd.to_datetime(signups.Timestamp, errors='coerce') # converts to naT or timestamp
gdsignups=signups.dropna(subset=['Timestamp']) # drops manual entries (no google drive timestamp)
'''
# merge w/ players and update grade, recalc grade adjustment, and school
# must use left merge to keep correct indices from players df (inner causes reindexing)
players=players.reset_index(drop=True)
tempplay=pd.merge(players, signups, how='inner', on=['Plakey'], suffixes=('','_n'))
tempplay=tempplay.dropna(subset=['Gender_n']) # this drops all without a google drive entry
for index, row in tempplay.iterrows():
upkwargs={}
# Skip approval for grade updates
if row.Grade!=row.Grade_n: # grade discrepancy between players.csv and current signup
match=players[players['Plakey']==row.Plakey]
if len(match)==1:
thisind=match.index[0]
# update player grade (no approval)
players.loc[thisind,'Grade']=row.Grade_n # set to new value from current signup file
print (row.First," ",row.Last," grade changed to ", row.Grade_n)
if row.School!=row.School_n and str(row.School_n)!='nan':
upkwargs.update({'school':True})
# Check for DOB inconsistency between google drive and players.csv
if row.DOB!=row.DOB_n: # don't change grade adjustment if DOB discrepancy
if row.DOB_n.year!=year: # skip birthday instead of DOB error
upkwargs.update({'DOB':True})
else: # recalculate grade adjustment
# Direct adjustment to gradeadj in players (if indicated)
players=updategradeadjust(row, players, year)
if 'school' in upkwargs or 'DOB' in upkwargs:
# Interactively approve school or DOB changes
players=updateplayer_tk(row, players, **upkwargs)
autocsvbackup(players,'players', newback=True) # run autobackup script
outname=cnf._OUTPUT_DIR+'\\players.csv'
players.to_csv(outname,index=False) # direct save of changes from google drive info
# now update new info into family contacts
# faminfo=gdsignups.drop_duplicates(subset=['Famkey']) # only process first kid from family
faminfo=signups.drop_duplicates(subset=['Famkey'])
famcontact=prepcontacts(famcontact)
faminfo=prepcontacts(faminfo)
tempfam=pd.merge(famcontact, faminfo, how='inner', on=['Famkey'], suffixes=('','_n')) # same indices as famcontact
tempfam=tempfam.dropna(subset=['Zip_n']) # drops those without timestamped google drive entry
for index,row in tempfam.iterrows():
# Update/reshuffle phone, email, parent list, parish of registration (direct to famcontact)
famcontact=update_contact(row, famcontact) # update/reshuffle phone,text (list of lists)
autocsvbackup(famcontact,'family_contact', newback=True) # run autobackup script
outname=cnf._INPUT_DIR+'\\family_contact.csv'
famcontact.to_csv(outname, index=False)
return players, famcontact
def updatefamcon_tk(row, famcontact, **upkwargs):
''' Interactive approval of family contact changes
changes directly made to famcontacts (but not yet autosaved)
upkwargs: phone, email, address
'''
root = tk.Tk()
root.title('Update family contact info')
choice=tk.StringVar() # must be define outside of event called functions
rownum=0
mytxt='Family: '+row.Family+' # '+str(row.Plakey)
tk.Label(root, text=mytxt).grid(row=rownum, column=0)
tk.Label(root, text='Deselect to remove').grid(row=rownum, column=1)
rownum+=1
# Use listbox of common schools?
if 'parlist' in upkwargs: # indicates new parent found
colnum=0
parlist=upkwargs.get('parlist',[])
# Checkboxes to add new parent
if 'newpar1' in upkwargs:
addpar1=tk.BooleanVar()
addpar1.set(True)
try:
mytext='Add parent: '+ (' '.join(upkwargs.get('newpar1',[]))+'?')
except:
print('Error adding parent 1', )
mytext=''
tk.Checkbutton(root, variable=addpar1, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
if 'newpar2' in upkwargs:
addpar2=tk.BooleanVar()
addpar2.set(True)
try:
mytext='Add parent: '+ (' '.join(upkwargs.get('newpar2',[]))+'?')
except:
mytext=''
tk.Checkbutton(root, variable=addpar2, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
# Checkbutton and boolvar for each parent (default true)
pbools=[] # List of bools for parent inclusion
for i in range(0,len(parlist)):
pbools.append(tk.BooleanVar())
pbools[i].set(True)
tempstr=parlist[i]
tk.Checkbutton(root, variable=pbools[i], text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
rownum+=1
if 'emails' in upkwargs: # indicates new parent found
emaillist=upkwargs.get('emails',[])
# Checkboxes to add new parent
colnum=0
if 'email1' in upkwargs:
addemail1=tk.BooleanVar()
addemail1.set(True)
email1=tk.StringVar()
email1.set(upkwargs.get('email1',''))
tk.Checkbutton(root, variable=addemail1, text='Add new email1').grid(row=rownum, column=colnum)
rownum+=1
tk.Entry(root, textvariable=email1).grid(row=rownum, column=colnum)
rownum+=1
if 'email2' in upkwargs:
addemail2=tk.BooleanVar()
addemail2.set(True)
email2=tk.StringVar()
email2.set(upkwargs.get('email2',''))
tk.Checkbutton(root, variable=addemail2, text='Add new email2').grid(row=rownum, column=colnum)
rownum+=1
tk.Entry(root, textvariable=email2).grid(row=rownum, column=colnum)
colnum+=1
# Checkbutton and boolvar for each email (default true)
ebools=[] # List of bools for parent inclusion
for i in range(0,len(emaillist)):
ebools.append(tk.BooleanVar())
tempstr=emaillist[i]
ebools[i].set(True)
tk.Checkbutton(root, variable=ebools[i], text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
rownum+=1
if 'phones' in upkwargs: # indicates new parent found
phlist=upkwargs.get('phones',[])
# Checkboxes to add new parent
colnum=0
if 'phone1' in upkwargs:
addphone1=tk.BooleanVar()
addphone1.set(True)
try:
mytext='Add phone/text: '+ upkwargs.get('phone1','')
except:
mytext=''
tk.Checkbutton(root, variable=addphone1, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
if 'phone2' in upkwargs:
addphone2=tk.BooleanVar()
addphone2.set(True)
try:
mytext='Add phone/text: '+ ', '.join(upkwargs.get('phone2',[]))
except:
mytext=''
tk.Checkbutton(root, variable=addphone2, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
# Checkbutton and boolvar for each email (default true)
phbools=[] # List of bools for parent inclusion
for i in range(0,len(phlist)):
phbools.append(tk.BooleanVar())
tempstr=phlist[i]
phbools[i].set(True)
tk.Checkbutton(root, variable=phbools[i], text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
if 'address' in upkwargs:
colnum=0
tk.Label(root, text='Possible change of address').grid(row=rownum, column=colnum)
rownum+=1
newaddrbool=tk.BooleanVar()
newaddr=tk.StringVar()
newaddrbool.set(False)
newaddr.set(row.Address_n)
newzip=tk.StringVar()
try:
newzip.set(int(row.Zip_n))
except:
print('Non-standard zip value',str(row.Zip_n))
tk.Checkbutton(root, variable=newaddrbool, text='Change address?').grid(row=rownum, column=colnum)
colnum+=1
tk.Label(root, text='Current address').grid(row=rownum, column=colnum)
colnum=0
rownum+=1
tk.Entry(root, textvariable=newaddr).grid(row=rownum, column=colnum)
rownum+=1
tk.Entry(root, textvariable=newzip).grid(row=rownum, column=colnum)
colnum+=1
tempstr=str(row.Address)+' '+str(row.Zip)
tk.Label(root, text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
# Now set up select/close buttons
def skip(event):
choice.set('skip')
root.destroy()
def change(event):
choice.set('change')
root.destroy()
f=tk.Button(root, text='Skip')
f.bind('<Button-1>', skip)
f.grid(row=rownum, column=0)
g=tk.Button(root, text='Change')
g.bind('<Button-1>', change)
g.grid(row=rownum, column=1)
root.mainloop()
mychoice=choice.get()
if mychoice=='change':
# Find matching row for family (needed for all changes below)
famkey=row.Famkey
match=famcontact[famcontact['Famkey']==famkey]
if len(match)==1:
thisind=match.index[0]
else:
print('Problem finding unique entry for famkey', str(famkey))
return famcontact # return unaltered
# Reconstruct parent list
if 'parlist' in upkwargs:
newparlist=[] # constructing entirely new parent list from checkbox choices
if 'newpar1' in upkwargs:
if addpar1.get():
newparlist.append(upkwargs.get('newpar1',[np.nan,np.nan]))
#TODO fix nan error
print('Added parent',' '.join(upkwargs.get('newpar1')),' to ',str(row.Family))
for i, val in enumerate(pbools):
if pbools[i].get():
newparlist.append(parlist[i]) # [first, last] format
if 'newpar2' in upkwargs:
if addpar2.get():
newparlist.append(upkwargs.get('newpar2',[np.nan,np.nan]))
print('Added parent 2',' '.join(upkwargs.get('newpar2')),' to ',str(row.Family))
# Now direct update of parents in this family's famcontact entry
newparlist=newparlist[0:3] # limit to 3 entries
while len(newparlist)<3:
newparlist.append([np.nan,np.nan]) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,4): # reset 3 existing parents entries
fname='Pfirst'+str(i)
lname='Plast'+str(i)
famcontact.loc[thisind, fname] = newparlist[i-1][0]
famcontact.loc[thisind, lname] = newparlist[i-1][1]
# Reconstruct email list
if 'emails' in upkwargs:
newemaillist=[]
if 'email1' in upkwargs:
if addemail1.get():
newemaillist.append(email1.get())
print('Added email1', email1.get(), ' to ', str(row.Family))
for i, val in enumerate(ebools):
if ebools[i].get():
newemaillist.append(emaillist[i])
if 'email2' in upkwargs:
if addemail2.get():
# insert in 2nd position
newemaillist.insert(1, email2.get())
print('Added email2', email2.get(), ' to ', str(row.Family))
# Now update emails in famcontact entry
# Direct update of parent list
newemaillist=newemaillist[0:3] # limit to 3 entries
while len(newemaillist)<3:
newemaillist.append(np.nan) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,4): # reset 3 existing parents entries
colname='Email'+str(i)
famcontact.loc[thisind, colname]= newemaillist[i-1]
# Reconstruct phone list
if 'phones' in upkwargs:
newphlist=[]
if 'phone1' in upkwargs:
if addphone1.get():
newphlist.append(upkwargs.get('phone1', [np.nan,np.nan]))
print('Added phone1', ','.join(upkwargs.get('phone1',[])), ' to ', str(row.Family))
for i, val in enumerate(phbools):
if phbools[i].get():
newphlist.append(phlist[i])
# added at end... probably should go
if 'phone2' in upkwargs:
if addphone2.get():
# insert in 2nd position
newphlist.insert(1, upkwargs.get('phone2',[np.nan,np.nan]))
print('Added phone2', ','.join(upkwargs.get('phone2',[])), ' to ', str(row.Family))
# Now update phone, text in famcontact entry
newphlist=newphlist[0:4] # limit to 4 entries
while len(newphlist)<4:
newphlist.append([np.nan, np.nan]) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,5): # reset max 4 phone entries
phname='Phone'+str(i)
textname='Text'+str(i)
famcontact.loc[thisind, phname] = newphlist[i-1][0]
famcontact.loc[thisind, textname] = newphlist[i-1][1]
# Handle change of address (direct change if approved)
# Also change associated zip code and reset parish of residence
if 'address' in upkwargs:
if newaddrbool:
print('Address changed for ', str(row.Family))
famcontact.loc[thisind, 'Address'] = newaddr.get()
# Reset parish of residence to nan (manually find and replace)
famcontact.loc[thisind, 'Parish_residence'] = np.nan
try:
famcontact.loc[thisind,'Zip']=int(newzip.get())
except:
print('Problem converting zip code ', newzip.get())
# TODO ... handle parish of registration
return famcontact
def update_contact(row, famcontact):
'''Update phone and textable list from google drive entries;
google drive raw entries first processed in process_data_changes (then update
contacts is called)
row is a merge of existing famcontact info and new signup info
existing entries from fam_contact listed first;
pass/modify/return series for family; reorder/replace numbers
has fairly long list of changes made w/o interactive approval:
1) changing order of email or phone numbers (e.g. swap phone1 and phone2)
2) add phone2 (or email2) if current phone2(email2) is nan
3) change order of parents (new parent1)
All other changes done w/ interactive approval using update_famcon_tk
'''
# [phone, text, order]
thisfam=row.Family
match=famcontact[famcontact['Famkey']==row.Famkey]
if len(match)==1:
thisind=match.index[0] # correct index for updating this family in famcontacts
else:
print(str(row.Family), " not found in famcontacts.. shouldn't happen")
return famcontact
upkwargs={} # empty dict for monitoring all changes
# check for possible change in address (housenum as trigger)
match1=re.search(r'\d+', row.Address)
match2=re.search(r'\d+', row.Address_n)
if match1 and match2:
num1=match1.group(0)
num2=match2.group(0)
if num1!=num2: # change in address number strongly suggestive of actual change
upkwargs.update({'address':True})
else:
print('No address # found for', str(thisfam))
phonelist=[] # list of lists with number and textable Y/N
for i in range(1,5): # get 4 existing phone entries (phone1, phone2, etc.)
phname='Phone'+str(i)
txtname='Text'+str(i)
if str(row[phname])!='nan':
phonelist.append([row[phname],row[txtname]]) # as phone and text y/N
# New google drive entries will be Phone1_n.. look for phone/text pair in existing list
if str(row.Phone1_n)!='nan' and [row.Phone1_n,row.Text1_n] in phonelist: # new ones phone is required entry
# default move of phone1, text1 to top of list - no confirmation
if [row.Phone1_n,row.Text1_n]!=phonelist[0]: # move if not in first position
phonelist.insert(0,phonelist.pop(phonelist.index([row.Phone1_n,row.Text1_n])))
print('Phone 1 changed for ', str(thisfam))
upkwargs.update({'phchange':True})
if str(row.Phone1_n)!='nan' and [row.Phone1_n,row.Text1_n] not in phonelist: # new ones phone is required entry
if [row.Phone1_n, np.nan] in phonelist: # remove if # present but w/o text indication (no confirm)
phonelist.remove([row.Phone1_n,np.nan])
phonelist.insert(0,[row.Phone1_n,row.Text1_n]) # insert in first position
print('Updated phone 1 to', row.Phone1_n,' for ',str(thisfam))
upkwargs.update({'phchange':True})
else:
# phone1 change to be confirmed
upkwargs.update({'phone1':[row.Phone1_n,row.Text1_n]})
upkwargs.update({'phones': phonelist})
if str(row.Phone2_n)!='nan': # check for phone2 entry (with _n suffix)
if [row.Phone2_n,row.Text2_n] not in phonelist: # add second phone to 2nd position if not present
if [row.Phone2_n,np.nan] in phonelist: # remove if # present but w/o text indication
phonelist.remove([row.Phone2_n,np.nan])
phonelist.insert(1,[row.Phone2_n,row.Text2_n])
print ('Updated phone 2 to ', str(row.Phone2_n), 'for ', str(thisfam))
upkwargs.update({'phchange':True})
else: # get approval for phone 2 addition
upkwargs.update({'phone2':[row.Phone2_n,row.Text2_n]})
upkwargs.update({'phones': phonelist})
# Construct existing list of known email addresses
emaillist=[]
for i in range(1,4): # get 3 existing email entries
emailname='Email'+str(i)
if str(row[emailname])!='nan':
emaillist.append(row[emailname].lower())
# Find new email1 entry in google drive data
if str(row.Email)!='nan' and '@' in row.Email: # real primary gd named email
if row.Email.lower() in emaillist: # add in first position if not present (no confirmation)
if row.Email.lower()!=emaillist[0]: # check if in first position already
emaillist.insert(0,emaillist.pop(emaillist.index(row.Email)))
upkwargs.update({'emchange':True})
print ('Updated email 1 ', str(row.Email.lower()), 'for family', str(thisfam))
else: # confirm email1 if not present
upkwargs.update({'email1':row.Email})
upkwargs.update({'emails':emaillist})
# look for new email in email2 position and add
if str(row.Email2_n)!='nan' and '@' in row.Email2_n:
if row.Email2_n.lower() not in emaillist: # add second email to 2nd position if not present
upkwargs.update({'email2':row.Email2_n})
upkwargs.update({'emails':emaillist})
# Update list of parent names (max 3 entries)
parlist=[] # construct existing list from family contacts
# skip if all nan for entered parents (non-gd entry)
for i in range(1,4): # construct existing parents list
fname='Pfirst'+str(i)
lname='Plast'+str(i)
if str(row[fname])!='nan':
parlist.append([row[fname],row[lname]]) # list of lists [first, last]
if str(row.Pfirst1_n)!='nan': # skip if parent name is nan
if [row.Pfirst1_n,row.Plast1_n] in parlist: # reorder in list
if [row.Pfirst1_n,row.Plast1_n]!=parlist[0]: # check if already in first
# move to first position (everything else requires approval)
parlist.insert(0,parlist.pop(parlist.index([row.Pfirst1_n,row.Plast1_n])))
parlist.insert(0,[row.Pfirst1_n, row.Plast1_n]) # insert in first position
upkwargs.update({'parchange':True})
else: # parent not in list (confirm)
upkwargs.update({'newpar1':[row.Pfirst1_n,row.Plast1_n]})
upkwargs.update({'parlist':parlist})
# inserts in first position while simultaneously removing other entry
if str(row.Pfirst2_n)!='nan': # Check for parent 2 entry
if [row.Pfirst2_n,row.Plast2_n] not in parlist: # add second phone to 2nd position if not present
upkwargs.update({'newpar2':[row.Pfirst2_n,row.Plast2_n]})
upkwargs.update({'parlist':parlist})
# Save auto-changes in phone to family contacts
if 'phchange' in upkwargs: # Record altered phonelist in famcontacts
if 'phones' in upkwargs: # if present in upkwargs, update list
upkwargs.update({'phones': phonelist}) # ensure most current copy
phonelist=phonelist[0:3] # construct proper list
while len(phonelist)<4:
phonelist.append([np.nan,np.nan]) # pad with nan entries if necessary
for i in range(1,5): # reset 4 existing phone entries
phname='Phone'+str(i)
txtname='Text'+str(i)
famcontact.loc[thisind, phname] = phonelist[i-1][0] # first of tuple is phone
famcontact.loc[thisind, txtname] = phonelist[i-1][1] # 2nd of tuple is text y/n
del upkwargs['phchange']
print('automatic phone changes for', thisfam)
# Save auto-changes in emails to family contacts
if 'emchange' in upkwargs: # Record altered phonelist in famcontacts
if 'emails' in upkwargs: # if present in upkwargs, update list
upkwargs.update({'emails': emaillist}) # ensure most current copy
emaillist=emaillist[0:2] # construct proper list
while len(emaillist)<3:
emaillist.append(np.nan) # pad with nan entries if necessary
for i in range(1,4): # reset 4 existing phone entries
emname='Email'+str(i)
famcontact.loc[thisind, emname] =emaillist[i-1]
del upkwargs['emchange']
print('automatic email changes for', thisfam)
if 'parchange' in upkwargs: # Record altered parents list in famcontacts
if 'parlist' in upkwargs: # if present in upkwargs, update list
upkwargs.update({'parlist': parlist}) # ensure most current copy
parlist=parlist[0:2] # construct proper list
while len(parlist)<3:
parlist.append(np.nan) # pad with nan entries if necessary (3 total)
for i in range(1,4): # reset 4 existing phone entries
fname='Pfirst'+str(i)
lname='Plast'+str(i)
try:
famcontact.loc[thisind, fname] =parlist[i-1][0]
famcontact.loc[thisind, lname] =parlist[i-1][1]
except:
print('Error updating parents for', thisfam)
del upkwargs['parchange']
print('automatic parent changes for', thisfam)
# now check for any changes needing interactive approval
if len(upkwargs)>0: # something needs interactive approval
famcontact=updatefamcon_tk(row, famcontact, **upkwargs)
return famcontact
def updateplayer_tk(row, players, **upkwargs):
''' Interactive approval of player info updates (except date)
changes directly made to players (but not yet autosaved)
called by processdatachanges
'''
commonschools=['Cabrini','Soulard','SLPS','Charter','Private']
root = tk.Tk()
root.title('Update player info')
choice=tk.StringVar() # must be define outside of event called functions
rownum=0
mytxt='Player:'+row.First+' '+row.Last+' # '+str(row.Plakey)
tk.Label(root, text=mytxt).grid(row=rownum, column=0)
rownum+=1
# Use listbox of common schools?
if 'DOB' in upkwargs: # indicates discrepancy
DOB1=date(row.DOB)
DOB2=date(row.DOB_n)
# create and display DOB variables
def ChooseDOB1(event):
DOB.set(datetime.strftime(DOB1,'%m/%d/%y'))
def ChooseDOB2(event):
DOB.set(datetime.strftime(DOB2,'%m/%d/%y'))
DOB=tk.StringVar()
DOB.set(datetime.strftime(DOB1,'%m/%d/%y')) # defaults to original
tk.Label(root, text='Update date of birth?').grid(row=rownum, column=0)
mytxt='current DOB:'+datetime.strftime(DOB1,'%m/%d/%y')
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', ChooseDOB1)
b.grid(row=rownum, column=1)
mytxt='New DOB:'+datetime.strftime(DOB2,'%m/%d/%y')
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', ChooseDOB2)
b.grid(row=rownum, column=2)
tk.Entry(master=root, textvariable=DOB).grid(row=rownum, column=3)
rownum+=1
if 'school' in upkwargs:
school=tk.StringVar()
school.set(row.School) # default to existing value
tk.Label(root, text='Update school?').grid(row=rownum, column=0)
rownum+=1
def newschool(event):
school.set(row.School_n)
def oldschool(event):
school.set(row.School)
def pickschool(event):
# double-click to pick standard school choice
items=lb.curselection()[0] # gets selected position in list
school.set(commonschools[items])
tk.Entry(root, textvariable=school).grid(row=rownum, column=2)
mytxt='new school:'+str(row.School_n)
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', newschool)
b.grid(row=rownum, column=1)
mytxt='existing school:'+str(row.School)
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', oldschool)
b.grid(row=rownum, column=0)
# also include selectable listbox of common school choices
lb=tk.Listbox(master=root, selectmode=tk.SINGLE)
lb.bind("<Double-Button-1>", pickschool)
lb.grid(row=rownum, column=3)
for i,sch in enumerate(commonschools):
lb.insert(tk.END, sch)
rownum+=1
# Now set up select/close buttons
def skip(event):
choice.set('skip')
root.destroy()
def change(event):
choice.set('change')
root.destroy()
f=tk.Button(root, text='Skip')
f.bind('<Button-1>', skip)
f.grid(row=rownum, column=0)
g=tk.Button(root, text='Change')
g.bind('<Button-1>', change)
g.grid(row=rownum, column=1)
root.mainloop()
mychoice=choice.get()
if mychoice=='change':
try:
# make changes directly to players after finding correct index using plakey
plakey=row.Plakey
match=players[players['Plakey']==plakey]
thisind=match.index[0]
if 'school' in upkwargs:
players.loc[thisind,'School']= school.get()
if 'DOB' in upkwargs:
newDOB=datetime.strptime(DOB.get(),'%m/%d/%y')
players.loc[thisind,'DOB']= newDOB
except:
print('Error updating info for', row.Plakey, row.First, row.Last)
return players
def prepcontacts(df):
''' Prepare for update contacts/ matching with google drive info
avoids possible problems/spaces in manually entered info '''
mycols=['Pfirst1', 'Plast1','Pfirst2', 'Plast2', 'Pfirst3', 'Plast3',
'Phone1', 'Text1','Phone2', 'Text2', 'Phone3', 'Text3', 'Phone4',
'Text4', 'Email1','Email2', 'Email3']
for i, col in enumerate(mycols):
try:
df.loc[:,col]=df[col].str.strip()
except: # maybe only nan or not present (i.e. in signups)
pass
mycols=['Text1','Text2','Text3']
for i, col in enumerate(mycols):
try:
df.loc[:,col]=df[col].str.replace('No','N', case=False)
df.loc[:,col]=df[col].str.replace('Yes','Y', case=False)
except:
pass
return df
def findyearseason(df):
''' Pass raw signups and determine year and sports season '''
# get year from system clock and from google drive timestamp
now=datetime.now()
val=df.Timestamp[0] # grab first timestamp
if val!=datetime: # if not a timestamp (i.e. manual string entry find one
while type(val)!=datetime:
for index, row in df.iterrows():
val=df.Timestamp[index]
year=val.year # use year value from signup timestamps
if now.year!=val.year:
print ('Possible year discrepancy: Signups are from ',str(val.year))
# now find sports season
mask = np.column_stack([df['Sport'].str.contains("occer", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Fall'
mask = np.column_stack([df['Sport'].str.contains("rack", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Spring'
mask = np.column_stack([df['Sport'].str.contains("asket", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Winter'
return season, year
def outputduplicates(df):
'''Prints out names of players with duplicated entries into console... can then delete from google drive signups '''
tempdf=df[df.duplicated(['First','Last','Sport'])] # series with 2nd of duplicated entries as True
firsts=tempdf.First.tolist()
lasts=tempdf.Last.tolist()
for f,l in zip(firsts, lasts):
print('Duplicated signup for player: {} {}'.format(f,l))
return
def formatphone(df):
''' Convert all entered phone numbers in dfs phone columns to 314-xxx-xxxx string and standardize text field '''
def phoneFormat(val):
# lambda function phone number reformatting
if not isinstance(val, str):
return val
# replace/remove any white space
val="".join(val.split(' '))
if val=='': # blank phone causes problems
return np.nan
if not re.search(r'(\d+-\d+-\d+)', val):
val=re.sub("[^0-9]", "", val) # substitute blank for non-number
if len(val)==7:
return '314'+val
elif len(val)==11 and val.startswith('1'): # remove starting 1 if present
return val[1:11]
elif len(val)!=10: # sometimes has ---
# print('Bad number: ',val)
return val
else:
return val[0:3]+'-'+val[3:6]+'-'+val[6:10]
else:
return val # already good
# find phone columns (named phone, phone2, etc.)
phlist=[str(s) for s in df.columns if 'Phone' in s]
for col in phlist:
df.loc[:,col]=df[col].apply(lambda x: phoneFormat(x))
# now change yes in any text field to Y
txtlist=[str(s) for s in df.columns if 'Text' in s]
for col in txtlist:
df.loc[:,col]=df[col].replace('yes','Y')
df.loc[:,col]=df[col].replace('Yes','Y')
return df
def standardizeschool(df):
''' can pass any frame with school column and standardize name as Cabrini and Soulard'''
schstr='frances' + '|' + 'cabrini' + '|' + 'sfca' # multiple school matching string
tempdf=df[df['School'].str.contains(schstr, na=False, case=False)]
df.loc[tempdf.index,'School']='Cabrini'
tempdf = df[df['School'].str.contains('soulard', na=False, case=False)]
df.loc[tempdf.index,'School']='Soulard'
tempdf = df[df['School'].str.contains('public', na=False, case=False)]
df.loc[tempdf.index,'School']='Public'
schstr='city garden' + '|' + 'citygarden' # multiple school matching string
tempdf = df[df['School'].str.contains(schstr, na=False, case=False)]
df.loc[tempdf.index,'School']='City Garden'
return df
def formatnamesnumbers(df):
'''Switch names to title case, standardize gender, call phone/text reformat and standardize school name'''
def titleStrip(val):
try:
return val.title().strip()
except:
return val
processCols=['First','Last','Family','Pfirst1','Plast1','Pfirst2','Plast2','Email','Email2']
processCols=[i for i in processCols if i in df.columns]
for col in processCols:
df.loc[:, col]=df[col].apply(lambda x: titleStrip(x))
if 'Gender' in df:
df.loc[:,'Gender']=df.Gender.replace('Girl','f')
df.loc[:,'Gender']=df.Gender.replace('Boy','m')
if 'Grade' in df:
df.loc[:,'Grade']=df.Grade.replace('K',0)
df.loc[:,'Grade']=df.Grade.replace('pK',0)
try:
df.loc[:,'Grade']=df.Grade.astype(int)
except:
print('Player grade likely missing from raw signup file... enter manually')
df=formatphone(df) # call phone reformatting string
if 'School' in df:
df=standardizeschool(df) # use "Cabrini" and "Soulard" as school names
return df
def graduate_players(players, year):
''' Recalc grade based on grade adjustment, school year (run once per year in fall) and age.
some player grades will already have been updated (generally google drive entries)... however recalc shouldn't
change grade '''
players.loc[:,'Grade']=players.Grade.replace('K',0)
for index,row in players.iterrows():
# replace K with zero
grade=int(players.iloc[index]['Grade']) # get currently listed grade
gradeadj=players.iloc[index]['Gradeadj']
dob=players.iloc[index]['DOB']
if str(gradeadj)=='nan' or str(dob)=='NaT': # skip grade update if info is missing
continue
dob=date(dob)
# calculate current age at beginning of this school on 8/1
age=date(year,8,1)-dob
age = (age.days + age.seconds/86400)/365.2425
# assign grade based on age (and grade adjustment)
newgrade=int(age)+int(gradeadj)-5
if grade!=newgrade:
first=players.iloc[index]['First']
last=players.iloc[index]['Last']
print('Grade changed from',grade,'to',newgrade,'for', first, last)
players.loc[index, 'Grade'] = newgrade
players.loc[:,'Grade']=players.Grade.replace(0,'K')
return players
def removeEmptyFams(players, famcontact):
'''
Remove empty families (with no remaining players)
'''
# Remove families with no active players
plaset=[int(i) for i in list(players.Famkey.unique())]
famset=[int(i) for i in list(famcontact.Famkey.unique())]
# Empty families
emptykey=[i for i in famset if i not in plaset]
empty=famcontact[famcontact['Famkey'].isin(emptykey)]
print('Remove empty families:')
for ind, row in empty.iterrows():
print(row.Family, ':',row.Pfirst1, row.Plast1)
choice=input("Remove empty families (Y,N)?\n")
if choice.upper()=='Y':
famcontact=famcontact[~famcontact['Famkey'].isin(emptykey)]
outname=cnf._INPUT_DIR+'\\family_contact.csv'
famcontact.to_csv(outname, index=False)
return famcontact
def removeHSkids(players):
''' Drop graduated players (9th graders) from list '''
grlist=[i for i in range(0,9)]
grlist.append('K')
Hs=players.loc[~(players.Grade.isin(grlist))]
for ind, row in Hs.iterrows():
print(row.First, row.Last)
choice=input('Remove above HS players (Y/N)?\n')
if choice.upper()=='Y':
players=players.loc[(players.Grade.isin(grlist))]
print('HS Players removed but not autosaved')
return players
def estimategrade(df, year):
'''Estimate grade for this sports season based on DOB.. not commonly used '''
for index, row in df.iterrows():
grade=df.loc[index]['Grade']
if str(grade)=='nan': # skips any players who already have assigned grade
dob=df.loc[index]['DOB']
dob=date(dob) # convert to datetime date from timestamp
first=df.loc[index]['First']
last=df.loc[index]['Last']
if str(dob)=='nan':
print ('DOB missing for ', first,' ', last)
continue # skip to next if dob entry is missing
currage=date(year,8,1) - dob
currage = (currage.days + currage.seconds/86400)/365.2425 # age on first day of school/ sports season
gradeest=int(currage-5)
if gradeest==0:
gradeest='K'
print(first, last, 'probably in grade', gradeest)
df.loc[index,'Grade']=gradeest
return df
def updateoldteams(teams, year):
''' Load old teams after copy to teams tab in teams_coaches, then auto-update year-grade
must be manually saved with saveteams... then any adjustments made manually in Excel'''
# check to ensure teams are not already updated
if teams.iloc[0]['Year']==year:
print('Teams already updated for ', year,' school year')
return teams # pass back unaltered
# temporarily make the K to 0 replacements
teams.Grade=teams.Grade.replace('K',0)
teams.loc[:'Graderange']=teams['Graderange'].astype(str) # convert all to string
teams.loc[:,'Year']=year
teams.loc[:,'Grade']+=1
for index, row in teams.iterrows():
grade=teams.loc[index]['Grade']
div=teams.loc[index]['Division'] # division must match grade
div=div.replace('K','0') # replace any Ks in string
newdiv=''.join([s if not s.isdigit() else str(grade) for s in div]) # find replace for unknown # w/ new grade
teams.loc[index,'Division'] = newdiv
cycname=teams.loc[index]['Team'] # update grade portion of team name
if cycname.startswith('K'):
newcycname='1'+ cycname[1:]
teams.loc[index,'Team'] = newcycname
elif cycname[0].isdigit(): # now update teams beginning w/ numbers
newcycname=str(grade)+ cycname[1:]
teams.loc[index,'Team']= newcycname
# update grade ranges
grrange=teams.loc[index]['Graderange'] # should be all numbers
grrange=grrange.replace('K','0')
newrange=''.join([str(int(i)+1) for i in grrange])
teams.loc[index,'Graderange'] = newrange # grade range stored as string, right?
# no auto-save... save with saveteams after checking for proper changes
return teams
def splitcoaches(df):
''' Pass CYC teams list, split and duplicate rows with comma separated vals in colname for extra coaches'''
df.loc[:,'Role']='Coach' # add col for head or asst (first entry for head coach)
# df['Open/Closed']='Closed'
assistants=df.dropna(subset=['AssistantIDs']) # drop teams w/ no asst coaches
for index, rows in assistants.iterrows():
val=assistants.loc[index,'AssistantIDs']
asstcoaches=[str(s) for s in val.split(',')] #list of assistants for single team
for i,asst in enumerate(asstcoaches):
newrow=assistants.loc[index] # duplicate entry as series
asst=asst.strip() # strip leading, trailing blanks
newrow.loc['Coach ID'] = asst # set this asst coaches ID
newrow.loc['Role'] = 'Assistant Coach'
df=df.append(newrow)
df=df.sort_values(['Team'],ascending=True)
return df
def addcoachestoroster(teams, coaches):
'''Creates roster entries for coaches for each CYC team
pass teams and coaches (with coach roster info)
needed roster cols are all below (except sport used in output parsing)
args: teams -- team table w/ head and asst coach CYC ids
coaches - coaches table with CYC Id (key) and associated info
returns: coachroster --separate df to be appended to main player roster
'''
# Add team coaches (match by CYC-IDs)
thismask = teams['Team'].str.contains('-', case=False, na=False) # finds this season's CYC level teams
CYCcoach=teams.loc[thismask] # also has associated sport
CYCcoach=splitcoaches(CYCcoach) # makes new row for all assistant coaches on CYC teams
CYCcoach=pd.merge(CYCcoach, coaches, how='left', on=['Coach ID'], suffixes=('','_r'))
mycols=['Sport','Fname', 'Lname', 'Street', 'City', 'State', 'Zip', 'Phone', 'Email', 'Birthdate', 'Sex', 'Role', 'Division', 'Grade', 'Team', 'School', 'Parish of Registration', 'Parish of Residence', 'Coach ID']
for col in [col for col in mycols if col not in CYCcoach.columns]:
CYCcoach[col]='' # birthdate generally missing
CYCcoach=CYCcoach[mycols] # put back in desired order
# drop duplicates on CYC ID, team (sometimes occurs during merge)
CYCcoach=CYCcoach.drop_duplicates(['Coach ID','Team'])
return CYCcoach
def countteamplayers(df, teams, season, year):
''' For each team, summarize number of players (subset those that are younger or older) and list of names
passing mastersignups'''
df=df[df['Year']==year] # removes possible naming ambiguity
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season,[])
df=df[df['Sport'].isin(sportlist)] # only this sports season
df.Grade=df.Grade.replace('K',0)
df.Grade=df.Grade.astype('int')
teams.loc[:,'Grade']=teams.Grade.replace('K',0)
teams.loc[:,'Grade']=teams.Grade.astype('int')
teams.loc[:,'Playerlist']=teams.Playerlist.astype('str')
for index, row in teams.iterrows():
teamname=teams.loc[index]['Team']
match=df[df['Team']==teamname] # all players on this team from master_signups
teams.loc[index,'Number'] = len(match) # total number of players
# compose player list (First L.) and add to teams
playerlist=[]
for ind, ro in match.iterrows():
first=match.loc[ind]['First']
last=match.loc[ind]['Last']
strname=first+' ' +last[0]
playerlist.append(strname)
players=", ".join(playerlist)
teams.loc[index,'Playerlist'] = players
# count players above or below grade level
thisgrade=int(teams.loc[index]['Grade'])
teams.loc[index,'Upper'] = (match.Grade > thisgrade).sum()
teams.loc[index,'Lower'] = (match.Grade < thisgrade).sum()
writetoxls(teams, 'Teams', 'Teams_coaches.xlsx')
return teams
def writecontacts(df, famcontact, players, season, year):
''' From mastersignups and teams, output contact lists for all teams/all sports separately '''
# Slice by sport: Basketball (null for winter?), Soccer, Volleyball, Baseball, T-ball, Softball, Track)
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
df=df.loc[(df['Sport'].isin(sportlist)) & (df['Year']==year)] # season is not in mastersignups... only individual sports
'''# for transfers to same school (but different grades), combine all into single list for given school
for index,row in df.iterrows():
if str(df.loc[index]['Team'])!='nan': # avoids nan team screwups
if '#' in df.loc[index]['Team']: # this combines Ambrose#2B, Ambrose#3G to single tab
df=df.set_value(index,'Team',df.loc[index]['Team'].split('#')[0])
'''
# get family contact info from famcontacts
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get school from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
# Sort by grade pre-split
df.loc[:,'Grade']=df.Grade.replace('K',0)
df.loc[:,'Grade']=df.Grade.apply(int)
df=df.sort_values(['Grade'], ascending=True)
df.loc[:,'Grade']=df.Grade.replace(0,'K') # replace K with zero to allow sorting
df.loc[:,'Team']=df.Team.replace(np.nan,'None') # still give contacts if team not yet assigned
df.loc[:,'Team']=df.Team.replace('','None')
# Standard sport contacts output for soccer, VB, basketball
if season!='Spring':
for i, sport in enumerate(sportlist):
fname=cnf._OUTPUT_DIR+'\\'+sport+'_'+str(year)+'_contacts.xlsx'
writer=pd.ExcelWriter(fname, engine='openpyxl')
Thissport=df[df['Sport']==sport]
teamlist= Thissport.Team.unique()
teamlist=np.ndarray.tolist(teamlist)
# Combine transfers to same school
transchools=[s.split('#')[0] for s in teamlist if '#' in s]
teamlist=[s for s in teamlist if '#' not in s]
teamlist.extend(transchools) # all to same school as single "team"
# now can organize contacts (and drop sport)
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2',
'Email2', 'Team', 'Pfirst1', 'Plast1', 'Pfirst2', 'Plast2', 'Plakey', 'Famkey', 'Family']
Thissport=Thissport[mycols] # drop columns and rearrange
for i, team in enumerate(teamlist):
thisteam=Thissport[Thissport['Team'].str.contains(team)]
thisteam.to_excel(writer,sheet_name=team,index=False) # this overwrites existing file
writer.save()
else: # handle spring special case
Balls=df[df['Sport']!='Track'] # all ball-bat sports together
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2',
'Email2', 'Team', 'Pfirst1', 'Plast1', 'Pfirst2', 'Plast2', 'Plakey', 'Famkey', 'Family']
Balls=Balls[mycols]
teamlist= Balls.Team.unique()
teamlist=np.ndarray.tolist(teamlist)
# Combine transfers
transchools=[s.split('#')[0] for s in teamlist if '#' in s]
teamlist=[s for s in teamlist if '#' not in s]
teamlist.extend(transchools) # all to same school as single "team"
fname=cnf._OUTPUT_DIR+'\\'+'Batball'+'_'+str(year)+'_contacts.xlsx'
writer=pd.ExcelWriter(fname, engine='openpyxl')
# create a separate tab for each team and write the contacts
for i, team in enumerate(teamlist):
thisteam=Balls[Balls['Team'].str.contains(team)]
thisteam.to_excel(writer,sheet_name=team,index=False) # this overwrites existing file
writer.save() # overwrites existing
# Entire track team as single file
Track=df[df['Sport']=='Track']
Track=Track[mycols] # drop columns and rearrange
fname=cnf._OUTPUT_DIR+'\\'+'Track'+'_'+str(year)+'_contacts.xlsx'
writer=pd.ExcelWriter(fname, engine='openpyxl')
Track.to_excel(writer,sheet_name='Track',index=False)
writer.save()
return
def makegoogcont(df, famcontact, players, season, year):
'''Create and save a google contacts file for all Cabrini teams
save to csv '''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
df=df.loc[(df['Sport'].isin(sportlist)) & (df['Year']==year)] # season is not in mastersignups... only individual sports
'''# for transfers to same school (but different grades), combine all into single list for given school
for index,row in df.iterrows():
if str(df.loc[index]['Team'])!='nan': # avoids nan team screwups
if '#' in df.loc[index]['Team']: # this combines Ambrose#2B, Ambrose#3G to single tab
df=df.set_value(index,'Team',df.loc[index]['Team'].split('#')[0])
'''
# get family contact info from famcontacts
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get school from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
# Drop any players not yet assigned
df=df.dropna(subset=['Team'])
# Full contacts list format for android/google
for i, sport in enumerate(sportlist):
Thissport=df[df['Sport']==sport]
teamlist= Thissport.Team.unique()
teamlist=np.ndarray.tolist(teamlist)
# drop if team is not yet assigned
teamlist=[s for s in teamlist if str(s) != 'nan']
# drop if team is 'drop'
teamlist=[s for s in teamlist if str(s) != 'drop']
# Drop all non-Cabrini transferred teams (which must contain #)
teamlist=[s for s in teamlist if '#' not in s]
# Combine track subteams to single team
teamlist=[s[0:5] if 'Track' in s else s for s in teamlist]
teamlist=set(teamlist)
teamlist=list(teamlist)
# now create google contacts list for each Cabrini team and save
for j, team in enumerate(teamlist):
thisteam=Thissport[Thissport['Team'].str.contains(team)]
# Drop duplicate from same family
thisteam=thisteam.drop_duplicates('Phone1')
thisteam.loc[:,'Name']=thisteam['First']+' '+thisteam['Last']
thisteam.loc[:,'Group']=sport+str(year)
mycols=['Name','Pfirst1','Last','Phone1','Phone2','Email1','Email2','Group']
newcols=['Name','Additional Name','Family Name','Phone 1 - Value','Phone 2 - Value',
'E-mail 1 - Value','E-mail 2 - Value','Group Membership']
thisteam=thisteam[mycols]
thisteam.columns=newcols
thisteam=thisteam.replace(np.nan,'')
fname=cnf._OUTPUT_DIR+'\\google'+team+'.csv'
thisteam.to_csv(fname, index=False)
return
def createsignups(df, Mastersignups, season, year):
''' pass signups and add signups to master list, also returns list of current
player keys by sport; typically use writesignupstoExcel instead
args:
df - signup (dataframe)
Mastersignups - existing all signups db-like file
season - ['Fall','Winter','Spring']
year- 4 digit year as int
returns:
Mastersignups - same with new unique entries
'''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
# Use comma sep on multiple sport entries??
now=datetime.now()
thisdate=date.strftime(now,'%m/%d/%Y') # for signup date
df.loc[:,'SUdate']=thisdate # can do this globally although might also add to signups
startlen=len(Mastersignups) # starting number of signups
intcols=['SUkey','Year']
for i, col in enumerate(intcols):
if col not in df:
df.loc[df.index, col]=np.nan
mycols=Mastersignups.columns.tolist() # desired column order
for i, col in enumerate(mycols):
if col not in df:
df.loc[df.index,col]=np.nan
# TODO one option here would be to clone comma-separated sport entries (i.e. track and softball)
for i, sport in enumerate(sportlist):
# Use caution here due to Tball in Softball string problem (currently set to T-ball)
thissport=df.loc[df['Sport'].str.contains(sport, na=False, case=False)] # also handles multi-sports
# Prepare necessary columns
for index, row in thissport.iterrows():
thissport.loc[index,'Sport'] = sport # set individually to formal sport name
thissport.loc[index,'Year'] = int(year)
thissport.loc[index,'SUkey'] = 0 # assigned actual key below
# Now organize signups and add year
Mastersignups=pd.concat([thissport,Mastersignups], ignore_index=True)
Mastersignups=Mastersignups[mycols] # put back in original order
# drop duplicates and save master signups file (keep older signup if present... already assigned SUkey)
Mastersignups=Mastersignups.sort_values(['Plakey', 'Sport','Year','SUkey'], ascending=False) # keeps oldest signup
Mastersignups=Mastersignups.drop_duplicates(subset=['Plakey', 'Sport','Year']) # drop duplicates (for rerun with updated signups)
newsignups=len(Mastersignups)-startlen # number of new signups added this pass
print('Added ', str(newsignups),' new ', season, ' signups to master list.')
# add unique SUkey (if not already assigned)
neededkeys = Mastersignups[(Mastersignups['SUkey']==0)] # filter by year
availSUkeys=findavailablekeys(Mastersignups, 'SUkey', len(neededkeys)) # get necessary # of unique SU keys
keycounter=0
for index, row in neededkeys.iterrows():
Mastersignups.loc[index,'SUkey'] = availSUkeys[keycounter] # reassign SU key in source master list
keycounter+=1 # move to next available key
Mastersignups.loc[:,'Grade']=Mastersignups.Grade.replace('K',0)
Mastersignups=Mastersignups.sort_values(['Year', 'Sport', 'Gender','Grade'], ascending=False)
Mastersignups.loc[:,'Grade']=Mastersignups.Grade.replace(0,'K')
# autocsvbackup(Mastersignups,'master_signups', newback=True)
Mastersignups.to_csv(cnf._INPUT_DIR + '\\master_signups.csv', index=False, date_format='mm/dd/yy') # automatically saved
return Mastersignups
def replaceacro(df, acronyms):
''' Pass df column and return with acronyms replaced with full translations (parishes and schools
currently used only for CYC rosters '''
for index, row in acronyms.iterrows():
acro=acronyms.loc[index]['acronym']
transl=acronyms.loc[index]['translation']
# TODO only for parish columns
df.loc[:,'Parish of Registration']=df['Parish of Registration'].replace(acro, transl)
df.loc[:,'Parish of Residence']=df['Parish of Residence'].replace(acro, transl)
df.loc[:,'School']=df['School'].replace(acro, transl)
return df
def createrosters(df, season, year, players, teams, coaches, famcontact, acronyms):
''' From Mastersignups of this season creates Cabrini CYC roster and transfers (for separate sports)
and all junior sports (calculates ages for Judge Dowd); pulls info merged from famcontact, players, teams, and coaches
teams should already be assigned using teams xls and assigntoteams function
returns: None ... direct save to OUTPUT_DIR
'''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
specials=['Chess','Track']
sports=sportsdict.get(season)
sportlist=[sport for sport in sports if sport not in specials]
speciallist=[sport for sport in sports if sport in specials] # for track, chess, other oddballs
Specials=df[(df['Year']==year) & (df['Sport'].isin(speciallist))] # deal with these at bottom
# Proceed with all normal South Central sports
df = df[(df['Year']==year) & (df['Sport'].isin(sportlist))] # filter by year
# make duplicate entry row for double-rostered players (multiple team assignments)
thismask = df['Team'].str.contains(',', na=False) # multiple teams are comma separated
doubles=df.loc[thismask]
for index, rows in doubles.iterrows():
team=doubles.loc[index,'Team']
team=team.split(',')[1] # grab 2nd of duplicate teams
doubles.loc[index, 'Team'] = team
df=pd.concat([df,doubles], ignore_index=True) # adds duplicate entry for double-rostered players with 2nd team
thismask = df['Team'].str.contains(',', na=False) # multiple teams are comma separated
for index, val in thismask.iteritems():
if val:
team=df.loc[index]['Team']
team=team.split(',')[0] # grab 1st of duplicate teams
df.loc[index, 'Team'] = team # removes 2nd team from first entry
# now grab all extra info needed for CYC rosters
# Street, City, State, Zip, Phone, email, Parishreg, parishres from fam-contact
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# Get division from Teams xls
df=pd.merge(df, teams, how='left', on=['Team'], suffixes=('','_r2')) # effectively adds other team info for roster toall players
# DOB, School from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
df.loc[:,'Role']='Player' # add column for role
# df['Open/Closed']=np.nan
df.loc[:,'Coach ID']=''
def formatDOB(val):
# Pat moore date format is 4/4/19.. reformat as string for csv output
try:
return datetime.strftime(val, "%m/%d/%y")
except:
# print('Problem converting %s of type %s to date string format' %(val, type(val)) )
return ''
# Find Cabrini CYC names (containing hyphen)
thismask = df['Team'].str.contains('-', case=False, na=False)
CabriniCYC=df.loc[thismask] # all players on Cabrini CYC teams all sports this season
# Finds info for CYC coaches (all sports) and generate roster entries
coachroster=addcoachestoroster(teams, coaches) # coaches roster already in correct format + sport column
if len(CabriniCYC)>1: # skip if all transfers or junior (i.e. in spring)
# Split by sport
for i, sport in enumerate(sportlist):
Sportroster=CabriniCYC[CabriniCYC['Sport']==sport]
# reformat this mess as single CYC roster
Sportroster=organizeroster(Sportroster)
# Add coaches from this sport to roster
Rostercoaches=coachroster[coachroster['Sport']==sport]
Rostercoaches=organizeroster(Rostercoaches)
Sportroster=pd.concat([Sportroster,Rostercoaches], ignore_index=True) # adds coaches and players together
Sportroster=Sportroster.sort_values(['Team','Role','Grade','Lname'])
fname=cnf._OUTPUT_DIR+'\\Cabrini_'+sport+'roster'+str(year)+'.csv'
Sportroster=replaceacro(Sportroster, acronyms) # replace abbreviations
Sportroster.loc[:,'Birthdate']=Sportroster['Birthdate'].apply(lambda x: formatDOB(x))
Sportroster.to_csv(fname, index=False)
# done with Cabrini CYC rosters
# Break out all other types of teams (transfers, junior teams, Chess/Track)
thismask = df['Team'].str.contains('-', case=False, na=False)
Others=df.loc[~thismask] # no hyphen for all non Cabrini CYC level (Cabrini junior and transfers)
# Cabrini transferred players to CYC teams with # (i.e. Ambrose#8B, OLS#3G)
# Non-CYC cabrini junior teams start with number
thismask = Others['Team'].str.contains('#', na=True) # flag nans and set to true (usually jr teams w/o assignment)
# Transferred teams contain # such as OLS#3G
Transfers=Others.loc[thismask] # transferred teams have # but no hyphen
for i, sport in enumerate(sportlist): # output roster for all transfers (all grades in case of CYC)
Transferroster=Transfers[Transfers['Sport']==sport]
Transferroster=organizeroster(Transferroster)
Transferroster=Transferroster.sort_values(['Team', 'Sex', 'Grade'], ascending=True)
fname=cnf._OUTPUT_DIR+'\\CYC'+sport+'transfers.csv'
Transferroster=replaceacro(Transferroster,acronyms)
Transferroster.loc[:,'Birthdate']=Transferroster['Birthdate'].apply(lambda x: formatDOB(x))
Transferroster.to_csv(fname, index=False)
# Now deal with junior cabrini (should be only thing left after Cabrini CYC<
# transfers, special sports
Juniorteams=Others.loc[~thismask] # remove transfers
Juniorteams=Juniorteams[Juniorteams['Team']!='drop'] # remove dropped players
# now output all junior teams in same format (sometimes needed by <NAME>)
# also calculate current age
if len(Juniorteams)>0:
Juniorteams=organizeroster(Juniorteams) # put in standard South Central roster format
# Calculate current age from DOBs (renamed to Birthdate for roster only)
Juniorteams.loc[:,'Age']=calcage(Juniorteams['Birthdate'])
fname=cnf._OUTPUT_DIR+'\\Cabrini_junior_teams_'+str(year)+'.csv'
Juniorteams=replaceacro(Juniorteams, acronyms)
Juniorteams.loc[:,'Birthdate']=Juniorteams['Birthdate'].apply(lambda x: formatDOB(x))
Juniorteams.to_csv(fname, index=False)
# Deal with special cases -Track and Chess
# Get DOB/school from players.. anything else needed by <NAME>?
Specials=pd.merge(Specials, players, how='left', on='Plakey', suffixes=('','_r'))
# needs address
Specials=pd.merge(Specials, famcontact, how='left', on='Famkey', suffixes=('','_r2'))
for i, sport in enumerate(speciallist): # output roster for all transfers (all grades in case of CYC)
Specials=Specials[Specials['Sport']==sport]
Specials=Specials.rename(columns={'DOB':'Birthdate'})
mycols=['First', 'Last','Gender','Team','Grade','Birthdate','School','Address','Zip']
Specials=Specials[mycols]
Specials=Specials.sort_values(['Gender', 'Birthdate', 'Grade'], ascending=True)
Specials.loc[:,'Birthdate']=Specials['Birthdate'].apply(lambda x: formatDOB(x))
fname= cnf._OUTPUT_DIR+'\\'+ sport+'_'+str(year)+'_rosters.csv'
Specials.to_csv(fname, index=False)
return
def makemultiteam(df):
'''Small utility called by assigntoteams to make temp teams df that has separate entry for each grade if team is mixed grade
then merge to assign teams is straightforward
twoteams- '''
# TODO annoying problem with combining teams due to K1 (string but not int)
mycols=df.dtypes.index
# Deal with K1, K2 and such teams
kteams=[str(s) for s in np.ndarray.tolist(df.Graderange.unique()) if 'K' in str(s)]
kteams=[s for s in kteams if len(s)>1] # combo teams only
kteams=df[df['Graderange'].isin(kteams)]
xtrateams=pd.DataFrame(index=np.arange(0,0),columns=mycols) # empty df
# clones rows to match lower grades in range
for index, row in kteams.iterrows():
tempstr= kteams.loc[index]['Graderange']
gr1=0 # 0 for grade K
gr2=int(tempstr[1])
for gr in range(gr1,gr2):
newrow=kteams.loc[index] # grabs row as series
newrow.loc['Grade'] = gr # set to correct grade
xtrateams=xtrateams.append(newrow) # add single row to temp df
df.loc[:,'Grade']=df.Grade.replace('K','0', regex=True)
# get rid of K string problem
df.loc[:,'Graderange']=df.Graderange.replace('K','0', regex=True)
df.loc[:,'Graderange']=df.Graderange.astype('int')
# now handle numbered multiteams (e.g. 45 78 two digit ints)
multiteams=df.loc[df['Graderange']>9] # subset of teams comprised of multiple grades
for index, row in multiteams.iterrows(): # check for 3 or more grades
# TODO make sure it's not 3 grades (i.e. K-2)
tempstr= str(multiteams.loc[index]['Graderange'])
gr1=int(tempstr[0])
gr2=int(tempstr[1])
for gr in range(gr1,gr2):
newrow=multiteams.loc[index] # grabs row as series
newrow.loc['Grade'] = gr # set to correct grade
xtrateams=xtrateams.append(newrow) # add single row to temp df
# Detect gender-grade-sport w/ two teams
# now combine with original df
df=pd.concat([df,xtrateams], ignore_index=True) # complete team set
df=df[mycols] # back in original order
df=df.sort_values(['Gender','Grade'], ascending=True)
# After cloning by grade, look for two teams per grade options
twoteams=df[df.duplicated(['Sport','Gender','Grade'])]
return df, twoteams
def detectrosterchange(PMroster, myroster):
'''Compare submitted and returned rosters to look for unique rows (altered by <NAME>)
first row is <NAME> version (presumably correct to match CYC database) and second row is my
submitted version... make any corrections to appropriate source data files
datetime format conversions can be problematic '''
# all columns by default, false drops both duplicates leaving unique rows
bothrosters=pd.concat([PMroster,myroster])
mycols=bothrosters.columns
nanrows=bothrosters[pd.isnull(bothrosters['Birthdate'])]
nanrows=nanrows.drop_duplicates(keep=False)
# ensure player rows are both in correct format
myroster=myroster[pd.notnull(myroster['Birthdate'])]
PMroster=PMroster[pd.notnull(PMroster['Birthdate'])]
def removeLeadZero(val):
if val.startswith('0'):
return val[1:]
else:
return val
myroster.loc[:,'Birthdate']=myroster['Birthdate'].apply(lambda x:pd.to_datetime(x).strftime('%m/%d/%Y'))
PMroster.loc[:,'Birthdate']=PMroster['Birthdate'].apply(lambda x:pd.to_datetime(x).strftime('%m/%d/%Y'))
myroster.loc[:,'Birthdate']=myroster['Birthdate'].apply(lambda x:removeLeadZero(x))
PMroster.loc[:,'Birthdate']=PMroster['Birthdate'].apply(lambda x:removeLeadZero(x))
bothrosters=pd.concat([PMroster,myroster])
bothrosters=bothrosters.sort_values(['Fname','Lname'])
# Fix date string differences
alteredrows=bothrosters.drop_duplicates(keep=False)
alteredrows=alteredrows.append(nanrows)
alteredrows=alteredrows[mycols]
alteredrows=alteredrows.sort_values(['Lname','Fname'])
return alteredrows
def saveteams(teams):
'''Save teams tab into teams_coaches.xlsx after changes have been made '''
book=load_workbook('Teams_coaches.xlsx')
writer=pd.ExcelWriter('Teams_coaches.xlsx', engine='openpyxl')
writer.book=book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
teams.to_excel(writer,sheet_name='Teams',index=False) # this overwrites existing file
writer.save() # saves xls file with all modified data
def assigntoteams(df, season, year, teams, overwrite=False):
'''From mastersignups finds CYC team name based on year, grade, gender and sport from teams tab
(which only contains names from this season/year to avoid screwing up old custom team assignments'''
# teamsmult has multi grade range teams with duplicates for merge matching
# twoteams is multiple teams for same grade
Teamsmult, Twoteams =makemultiteam(teams) # makes duplicates team entries to match both grades
# compare grades as ints with K=0
df.loc[:,'Grade']=df.Grade.replace('K','0', regex=True) # convert Ks to zeros
df.loc[:,'Grade']=df['Grade'].astype('int')
Teamsmult.loc[:,'Grade']=Teamsmult['Grade'].astype('int') # ensure these are ints
# left merge keeps all master_signups oentries
df=pd.merge(df, Teamsmult, how='left', on=['Year','Grade','Gender','Sport'], suffixes=('','_r'))
# need to drop SUkey duplicates (keeping first)... occurs if >1 team per grade
df=df.drop_duplicates(subset=['SUkey']) # drops any duplicates by unique SUkey
# Consider all sports except Track (team assignment done separately by DOB)
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
# this is post-merge so no chance of getting indices screwed up
# select current sports & year and subset with new team assignment
CurrentSU=df.loc[(df['Sport'].isin(sportlist)) & (df['Year']==year) & (pd.notnull(df['Team_r']))]
if overwrite==False: # if no overwrite, keep only those with nan for team
CurrentSU=CurrentSU.loc[pd.isnull(CurrentSU['Team'])]
# Never overwrite team assignment for known drops
CurrentSU=CurrentSU[CurrentSU['Team']!='drop']
counter=0
for index, row in CurrentSU.iterrows():
# all remaining can be overwritted (those w/ existing team dropped above)
match=df[df['SUkey']==CurrentSU.loc[index]['SUkey']]
if len(match)==1:
thisind=match.index[0]
# add new team assignment to correct index in original master signups
df.loc[thisind, 'Team'] = CurrentSU.loc[index]['Team_r']
counter+=1
print(str(counter),' player(s) newly assigned to teams')
# now drop extra columns and sort
mycols=['SUkey','First', 'Last', 'Grade', 'Gender', 'Sport', 'Year', 'Team', 'Plakey','Famkey', 'Family',
'SUdate', 'Issue date', 'Uniform#','UniReturnDate']
df.loc[:,'Grade']=df.Grade.replace('K',0)
df=df.sort_values(['Year','Sport', 'Gender', 'Grade'], ascending=True)
df.loc[:,'Grade']=df.Grade.replace('0','K', regex=True) # make sure any 0 grades are again replaced with K
df=df[mycols]
autocsvbackup(df,'master_signups', newback=True) # autobackup of master signups
df.to_csv(cnf._INPUT_DIR + '\\master_signups.csv', index=False) # save/overwrite existing csv
return df
def assigntrackgroup(df, year, players):
'''Assign to different track team based on age on May 31 of this year (school year+1)
'''
Track=df[(df['Sport']=='Track') & (df['Year']==year)]
Track=pd.merge(Track,players, how='left', on=['Plakey'], suffixes=('','2'))
numunassigned=len(Track[pd.isnull(Track['Team'])])
for index, row in Track.iterrows():
DOB=Track.loc[index]['DOB'] # merged from players.csv
if isinstance(DOB,str):
DOB=datetime.strptime(DOB,"%m/%d/%Y").date() # convert string to datetime
elif isinstance(DOB, pd.tslib.Timestamp):
DOB=DOB.date() # convert timestamp to datetime
trackage=date(year+1,5,31)-DOB # age on prior year's May 31st (same as school year in current convention)
trackage=(trackage.days + trackage.seconds/86400)/365.2425 # as decimal
trackage=math.floor(trackage)
if trackage <=7:
team='Track7'
elif 8 <= trackage <=9:
team='Track89'
elif 10 <= trackage <=11:
team='Track1011'
elif 12 <= trackage <=13:
team='Track1213'
elif 14 <= trackage <=15:
team='Track1415'
else: # probably some entry error
mystr=Track.loc[index]['First']+' '+Track.loc[index]['Last']+' Grade:'+Track.loc[index]['Grade']
print('Suspected DOB error for',mystr, 'DOB:', datetime.strftime(DOB, "%m/%d/%y") )
team=''
# Now write back altered subset to mastersignups (index is lost so use SUkey)
SUkey=int(Track.loc[index]['SUkey'])
match=df[df['SUkey']==SUkey] # This gives correct index
df.loc[match.index[0], 'Team'] = team # alter/assign team for this signup
newlyassigned=numunassigned-len(Track[pd.isnull(Track['Team'])])
print(newlyassigned,' players assigned to track age group.')
return df
def readbackevents(trackevents):
'''
Reads back choices of track events from summary sheet and prep for
copy to Pat Moore spreadsheet
in 4x100, 4x200, 4x400 col enter start order 1,2,3,4,1A,2A
'''
regcols=['Last', 'First', 'Middle', 'Gender',
'DOB', 'Team Code','Event#1', 'Event#2', 'Event#3', 'Event#4']
# Manually enter order of runners and alternates for relays
events=['50M', '100M', '200M', '400M', '800M', '1600M', 'SoftThrow',
'ShotPut','StdLongJump', 'RunLongJump']
regfile=
|
pd.DataFrame(columns=regcols)
|
pandas.DataFrame
|
# https://github.com/tidyverse/tibble/blob/master/tests/testthat/test-tibble.R
from pandas import DataFrame
from pandas.testing import assert_frame_equal
import pytest
from datar import f
from datar.core.exceptions import DataUnrecyclable
from datar.tibble import *
from datar.base import c, nrow, rep, dim, sum, diag, NA, letters, LETTERS, NULL, seq
from datar.dplyr import pull, mutate
from datar.datasets import iris
from .conftest import assert_iterable_equal
def test_mixed_numbering():
df = tibble(a=f[1:5], b=seq(5), c=c(1,2,3,[4,5]), d=c(f[1:3], c(4, 5)))
exp = tibble(a=seq(1,5), b=f.a, c=f.a, d=f.a)
assert_frame_equal(df, exp)
def test_correct_rows():
out = tibble(value=range(1,11)) >> nrow()
assert out == 10
out = tibble(value=range(1,11), name="recycle_me") >> nrow()
assert out == 10
out = tibble(name="recycle_me", value=range(1,11)) >> nrow()
assert out == 10
out = tibble(name="recycle_me", value=range(1,11), value2=range(11,21)) >> nrow()
assert out == 10
out = tibble(value=range(1,11), name="recycle_me", value2=range(11,21)) >> nrow()
assert out == 10
def test_null_none_ignored():
out = tibble(a=None)
expect = tibble()
assert_frame_equal(out, expect)
out = tibble(a_=None, a=1)
expect = tibble(a=1)
assert_frame_equal(out, expect)
out = tibble(a=None, b=1, c=[2,3])
expect = tibble(b=1, c=[2,3])
assert_frame_equal(out, expect)
out = tibble(None, b=1, c=[2,3])
expect = tibble(b=1, c=[2,3])
assert_frame_equal(out, expect)
def test_recycle_scalar_or_len1_vec():
out = tibble(value=range(1,11)) >> nrow()
assert out == 10
out = tibble(value=range(1,11), y=1) >> nrow()
assert out == 10
out = tibble(value=range(1,11), y=[1]) >> nrow()
assert out == 10
with pytest.raises(DataUnrecyclable):
tibble(value=range(1,11), y=[1,2,3])
def test_recycle_nrow1_df():
out = tibble(x=range(1,11), y=tibble(z=1))
expect = tibble(x=range(1,11), y=tibble(z=rep(1,10)))
assert_frame_equal(out, expect)
out = tibble(y=tibble(z=1), x=range(1,11))
expect = tibble(y=tibble(z=rep(1,10)), x=range(1,11))
assert_frame_equal(out, expect)
out = tibble(x=1, y=tibble(z=range(1,11)))
expect = tibble(x=rep(1,10), y=tibble(z=range(1,11)))
assert_frame_equal(out, expect)
out = tibble(y=tibble(z=range(1,11)), x=1)
expect = tibble(y=tibble(z=range(1,11)), x=rep(1,10))
assert_frame_equal(out, expect)
def test_missing_names():
x = range(1,11)
df = tibble(x, y=x)
assert df.columns.tolist() == ['x', 'y']
def test_empty():
zero = tibble()
d = zero >> dim()
assert d == (0, 0)
assert zero.columns.tolist() == []
def test_tibble_with_series():
df = tibble(x=1)
df2 = tibble(df.x)
assert_frame_equal(df, df2)
def test_hierachical_names():
foo = tibble(x=tibble(y=1,z=2))
assert foo.columns.tolist() == ['x$y', 'x$z']
pulled = foo >> pull(f.x)
assert pulled.columns.tolist() == ['y', 'z']
foo = tibble(x=dict(y=1,z=2))
assert foo.columns.tolist() == ['x$y', 'x$z']
pulled = foo >> pull(f.x)
assert pulled.columns.tolist() == ['y', 'z']
def test_meta_attrs_preserved():
foo = tibble(x=1)
foo.attrs['a'] = 1
bar = tibble(foo)
assert bar.attrs['a'] == 1
def test_f_pronoun():
foo = tibble(a=1, b=f.a)
bar = tibble(a=1, b=1)
assert foo.equals(bar)
def test_nest_df():
out = tibble(a=1, b=tibble(x=2))
assert_iterable_equal(out.columns, ['a', 'b$x'])
def test_mutate_semantics():
foo = tibble(a=[1,2], b=1, c=f.b / sum(f.b))
bar = tibble(a=[1,2], b=[1,1], c=[.5,.5])
assert foo.equals(bar)
foo = tibble(b=1, a=[1,2], c=f.b / sum(f.b))
bar = tibble(b=[1,1], a=[1,2], c=[.5,.5])
assert foo.equals(bar)
foo = tibble(b=1.0, c=f.b / sum(f.b), a=[1,2])
bar = tibble(b=[1.0,1.0], c=[1.0,1.0], a=[1,2])
assert foo.equals(bar)
x = 1
df = tibble(x, f.x*2)
assert_frame_equal(df, tibble(x=1, **{'f.x*2': 2}))
# TODO: units preseved when recycled
def test_auto_splicing_anonymous_tibbles():
df = tibble(a=1, b=2)
out = tibble(df)
assert out.equals(df)
out = tibble(df, c=f.b)
expect = tibble(a=1,b=2,c=2)
assert_frame_equal(out, expect)
def test_coerce_dict_of_df():
df = tibble(x=range(1,11))
out = tibble(dict(x=df)) >> nrow()
assert out == 10
out = tibble(dict(x=diag(5))) >> nrow()
assert out == 5
def test_subsetting_correct_nrow():
df = tibble(x=range(1,11))
out = tibble(x=df).loc[:4,:]
expect = tibble(x=df.loc[:4,:])
assert_frame_equal(out, expect)
def test_one_row_retains_column():
out = tibble(y=diag(5)).loc[0, :]
expect = tibble(y=diag(5).loc[0, :].values)
assert (out.values.flatten() == expect.values.flatten()).all()
# tribble
def test_tribble():
out = tribble(
f.colA, f.colB,
"a", 1,
"b", 2
)
expect = tibble(colA=["a", "b"], colB=[1,2])
assert_frame_equal(out, expect)
out = tribble(
f.colA, f.colB, f.colC, f.colD,
1,2,3,4,
5,6,7,8
)
expect = tibble(
colA=[1,5],
colB=[2,6],
colC=[3,7],
colD=[4,8],
)
assert_frame_equal(out, expect)
out = tribble(
f.colA, f.colB,
1,6,
2,7,
3,8,
4,9,
5,10
)
expect = tibble(
colA=[1,2,3,4,5],
colB=[6,7,8,9,10]
)
|
assert_frame_equal(out, expect)
|
pandas.testing.assert_frame_equal
|
"""
Core implementation of :mod:`sklearndf.transformation.wrapper`
"""
import logging
from abc import ABCMeta, abstractmethod
from typing import Any, Generic, List, Optional, TypeVar, Union
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.impute import MissingIndicator, SimpleImputer
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.manifold import Isomap
from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, PolynomialFeatures
from pytools.api import AllTracker
from ... import TransformerDF
from ...wrapper import TransformerWrapperDF
log = logging.getLogger(__name__)
__all__ = [
"BaseDimensionalityReductionWrapperDF",
"BaseMultipleInputsPerOutputTransformerWrapperDF",
"ColumnPreservingTransformerWrapperDF",
"ColumnSubsetTransformerWrapperDF",
"ComponentsDimensionalityReductionWrapperDF",
"FeatureSelectionWrapperDF",
"NComponentsDimensionalityReductionWrapperDF",
"NumpyTransformerWrapperDF",
"ColumnTransformerWrapperDF",
"IsomapWrapperDF",
"ImputerWrapperDF",
"MissingIndicatorWrapperDF",
"AdditiveChi2SamplerWrapperDF",
"KBinsDiscretizerWrapperDF",
"PolynomialFeaturesWrapperDF",
"OneHotEncoderWrapperDF",
]
#
# type variables
#
T_Transformer = TypeVar("T_Transformer", bound=TransformerMixin)
# T_Imputer is needed because sklearn's _BaseImputer only exists from v0.22 onwards.
# Once we drop support for sklearn 0.21, _BaseImputer can be used instead.
# The following TypeVar helps to annotate availability of "add_indicator" and
# "missing_values" attributes on an imputer instance for ImputerWrapperDF below
# noinspection PyProtectedMember
from sklearn.impute._iterative import IterativeImputer
T_Imputer = TypeVar("T_Imputer", SimpleImputer, IterativeImputer)
#
# Ensure all symbols introduced below are included in __all__
#
__tracker = AllTracker(globals())
#
# wrapper classes for transformers
#
class NumpyTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer], metaclass=ABCMeta
):
"""
Abstract base class of DF wrappers for transformers that only accept numpy arrays.
Converts data frames to numpy arrays before handing off to the native transformer.
Implementations must define :meth:`_get_features_original`.
"""
# noinspection PyPep8Naming
def _adjust_X_type_for_delegate(
self, X: pd.DataFrame, *, to_numpy: Optional[bool] = None
) -> np.ndarray:
assert to_numpy is not False, "X must be converted to a numpy array"
return super()._adjust_X_type_for_delegate(X, to_numpy=True)
def _adjust_y_type_for_delegate(
self,
y: Optional[Union[pd.Series, pd.DataFrame]],
*,
to_numpy: Optional[bool] = None,
) -> Optional[np.ndarray]:
assert to_numpy is not False, "y must be converted to a numpy array"
return super()._adjust_y_type_for_delegate(y, to_numpy=True)
class ColumnSubsetTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer], metaclass=ABCMeta
):
"""
Abstract base class of DF wrappers for transformers that do not change column names,
but that may remove one or more columns.
Implementations must define :meth:`_get_features_out`.
"""
@abstractmethod
def _get_features_out(self) -> pd.Index:
# return column labels for arrays returned by the fitted transformer.
pass
def _get_features_original(self) -> pd.Series:
# return the series with output columns in index and output columns as values
features_out = self._get_features_out()
return
|
pd.Series(index=features_out, data=features_out.values)
|
pandas.Series
|
import os.path
import numpy as np
import scipy.sparse as sp
import torch
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from sklearn.metrics import *
from sklearn.model_selection import *
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def process_data(graph_type='corr_coef_pval', group='', rand_cut=0.1, pval_cut=0.05, coef_cut=0.7, dataset='adni', feats='dma'):
if(dataset == 'adni'):
# 506 samples
# <id> <74 features> <class label>
clinical = pd.read_excel('/data1/GCN_AB/adni/CTX_Demo_n506.xlsx')
if(group == '.mci'):
clinical = clinical[clinical.Dx == 1]
elif(group == '.nc'):
clinical = clinical[clinical.Dx == 0]
id = clinical.loc[:, "ID"]
demo = clinical.loc[:, ["Age","Sex", "Edu", "APOEe4"]]
mri = clinical.loc[:, "SupraTentorial":"RightMiddleTemporal"]
label = clinical.loc[:, "111CUTOFF"]
# prs = pd.read_csv('{}prs_output_IGAP.profile'.format(path), delimiter=r'\s+')
elif(dataset == 'smc'):
# 521 samples
# <id> <25 features> <class label>
clinical = pd.read_excel('/data1/GCN_AB/smc/CTX_Demo_n521.xlsx')
if(group == '.mci'):
clinical = clinical[clinical.ABPET_Dx == 1]
elif(group == '.nc'):
clinical = clinical[clinical.ABPET_Dx == 0]
id = clinical.loc[:, "IID"]
demo = clinical.loc[:, ["test_age", "sex", "education", "APOE4"]]
mri = clinical.loc[:, "ICV.mm3.":"HV_native_R"]
label = clinical.loc[:, "amyloid_beta"]
feat = pd.concat([demo, mri], axis=1)
if(feats == 'dm'):
feat = feat.drop(['APOEe4'], axis=1)
feat = (feat - feat.min()) / (feat.max() - feat.min())
print(feat.shape)
df = pd.concat([id, feat, label], axis=1)
df.to_csv("/data1/GCN_AB/{}/{}_{}{}.content".format(dataset, dataset, feats, group), sep=' ', index=False, header=False)
if(graph_type == 'random'):
edge_cut = np.around(rand_cut/100*len(id)*len(id)).astype(int)
o = np.ones(edge_cut, dtype=np.int)
z = np.zeros(np.product((len(id), len(id))) - edge_cut, dtype=np.int)
board = np.concatenate([o, z])
np.random.shuffle(board)
board = board.reshape((len(id), len(id)))
graph = nx.from_pandas_adjacency(pd.DataFrame(board, index=id, columns=id))
# graph = nx.binomial_graph(n=506, p=edge_cut/((506*506)/2), seed=22)
# graph = nx.relabel_nodes(graph, {new:old for new,old in enumerate(df.ID)}, copy=False)
graph.remove_edges_from(nx.selfloop_edges(graph))
print(nx.info(graph))
# plot_degree_dist(graph)
nx.write_edgelist(graph, "/data1/GCN_AB/{}/{}_{}_{}_{}{}.edges".format(dataset, dataset, feats, graph_type, rand_cut, group), data=False)
elif(graph_type == 'corr_coef_pval'):
from scipy.stats import pearsonr
corr_df = feat.T
corr_df.columns = id
corr_coef_df = corr_df.corr()
corr_pval_df = corr_df.corr(method=lambda x, y: pearsonr(x, y)[1]) - np.eye(len(corr_df.columns))
df1 = corr_coef_df.stack().reset_index()
df1.columns = ['s', 't', 'w']
print(df1.head())
df2 = corr_pval_df.stack().reset_index()
df2.columns = ['s', 't', 'p']
print(df2.head())
merged_df = pd.concat([df1, df2], axis=1, join='inner')
print(merged_df.head())
print('df1 {}, df2 {}, merged {}'.format(df1.shape[0], df2.shape[0], merged_df.shape[0]))
# graph = nx.from_pandas_adjacency((np.abs(corr_coef_df) > 0.9).astype(int))
# graph = nx.from_pandas_adjacency((corr_pval_df < edge_cut).astype(int))
# graph = nx.from_pandas_adjacency(((np.abs(corr_coef_df) > coef_cut) & (corr_pval_df < pval_cut)).astype(int))
# graph = nx.from_pandas_adjacency(corr_coef_df[corr_pval_df < pval_cut].fillna(0))
# graph.remove_edges_from(nx.selfloop_edges(graph))
# print(nx.info(graph))
# print('sparsity {:.4f}%'.format(graph.number_of_edges() / (graph.number_of_nodes() * graph.number_of_nodes()) * 100))
# plot_degree_dist(graph)
# edge_cut = "{:.1f}_{}".format(coef_cut, pval_cut)
edge_cut = 'weighted'
fname = "/data1/GCN_AB/{}/{}_{}_{}_{}{}.edges".format(dataset, dataset, feats, graph_type, edge_cut, group)
# nx.write_edgelist(graph, fname, data=['weight'])
merged_df.to_csv(fname, sep=' ', header=False, index=False)
sort_edgelist(fname)
# nx.draw_networkx(graph_pval, with_labels=False, node_size=30)
def sort_edgelist(fname):
edgelist =
|
pd.read_table(fname, header=None, sep=' ')
|
pandas.read_table
|
# -*- coding: utf-8 -*-
"""
Small analysis of Estonian kennelshows using Bernese mountain dogs data from kennelliit.ee and CatBoost algorithm
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from catboost import CatBoostRegressor, CatBoostClassifier, Pool, cv
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn import metrics
import seaborn as sns
# load and join all data to a single frame
df_2019 = pd.read_csv('dogshows_bernese_est_2019.csv')
df_2018 = pd.read_csv('dogshows_bernese_est_2018.csv')
df_2017 = pd.read_csv('dogshows_bernese_est_2017.csv')
df_2016 = pd.read_csv('dogshows_bernese_est_2016.csv')
df_2015 = pd.read_csv('dogshows_bernese_est_2015.csv')
df_2014 = pd.read_csv('dogshows_bernese_est_2014.csv')
df_2013 = pd.read_csv('dogshows_bernese_est_2013.csv')
frames = [df_2019, df_2018, df_2017, df_2016, df_2015, df_2014, df_2013]
df = pd.concat(frames, join='inner').reset_index(drop=True)
del frames, df_2019, df_2018, df_2017, df_2016, df_2015, df_2014, df_2013
# general preprocessing
df['tulemused'] = df['tulemused'].fillna(value='0') # fill empty values
df["koer"].value_counts() # check dogs' number of records
df = df[df.groupby('koer').koer.transform(len) > 6] # leave out dogs that have low appearance
#df = df[df.groupby('kohtunik').kohtunik.transform(len) > 10] # leave out dogs that have low appearance
df.insert(loc=6, column='judge_country', value=df['kohtunik'], allow_duplicates=True) # copy column with new name to separate judge and country
df['kohtunik'] = df['kohtunik'].str.replace('Kohtunik: ','').str.split(", ").str[0] # replace strings in column and select first element
df['judge_country'] = df['judge_country'].str.replace('Eesti','Estonia').str.split(", ").str[1] # replace strings in column and select last element
df.insert(loc=3, column='gender', value=df['klass'], allow_duplicates=True) # copy column with new name to divide dog class and gender
df['klass'] = df['klass'].str.replace('E ','').str.replace('I ', '') # remove gender info from class
df['gender'] = df['gender'].str.split(" ").str[0] # replace strings in column and select first words as gender (E or I)
df['koer'] = df['koer'].str.replace('-','').str.replace('/','').str.replace('.','').str.replace("'","").str.replace('LŠVK ', 'LSVK').str.replace('LŠVK', 'LSVK').str.replace('Ž', 'Z') # replace some chars
df['dogcode'] = df['koer'].str.split(' ', 1).str[0] # separate dog code to a new column
df['koer'] = df['koer'].str.title().str.split(' ', 1).str[1] # separate dog name and leave the doge code out
# delete inferior classes (repeating show results Kasv, Paar and Järg) and babies and puppies results
df = df[df.klass != 'Kasv'] # delete rows with value 'Kasv'
df = df[df.klass != 'Paar'] # delete rows with value 'Paar'
df = df[df.klass != 'Järg'] # delete rows with value 'Järg'
df = df[df.klass != 'Beebi'] # delete rows with value 'Beebi'
df = df[df.klass != 'Kuts'] # delete rows with value 'Kuts'
# remove some spaces from results and transform string to a list of results
df['tulemused'] = df['tulemused'].str.replace('SP 1','SP1').str.replace('SP 2','SP2').str.replace('SP 3','SP3').str.replace('SP 4','SP4').str.replace('VL 1','VL1').str.replace('VL 2','VL2').str.replace('VL 3','VL3').str.replace('1 EAH','1EAH').str.replace('2 EAH','2EAH').str.replace('3 EAH','3EAH').str.replace('4 EAH','4EAH').str.replace('Jun SERT','JunSERT')
df["tulemused"] = df["tulemused"].str.split(' ')
# set data types to category
df['naitus'] = df['naitus'].astype('category')
df['koer'] = df['koer'].astype('category')
df['klass'] = df['klass'].astype('category')
df['gender'] = df['gender'].astype('category')
df['kohtunik'] = df['kohtunik'].astype('category')
# count amounts of participants per class and add column with that information to the class data frame
df_dogs_per_show = df.groupby('naitus').size().reset_index(name='dogs_per_show')
df = pd.merge(df, df_dogs_per_show, on=['naitus'], how='inner')
del df_dogs_per_show
"""Hierarcy of titles by dog classes
All dogs (open class - dogs of Ava,Ch, Noo, Jun, Vet classes):
14 - TP
13 - VSP
12 - PI2+SK, PE2+SK
11 - PI3+SK, PE3+SK
10 - PI4+SK, PE4+SK
9 - SK
8 - SP1
7 - SP2
6 - SP3
5 - SP4
4 - VH
3 - H
2 - R
1 - EVH
0 - 0
"""
# formatted show results as numeric and categorical variables - uncomment the one that will be used
gradelist = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] # numeric grades
#gradelist = ['muhvigi', 'ei_voi_hinnata', 'rahuldav', 'hea', 'vaga_hea', 'suureparane', 'suureparane_4', 'suureparane_3', 'suureparane_2', 'suureparane_1', 'sert_kandidaat', 'parim_4', 'parim_3', 'parim_2', 'vastassugupoole_parim', 'tou_parim'] # categorical grades
# Remap dog result as one grade
def calculate_grade(row):
grade = gradelist[0]
sk = False
try:
if 'SK' in row["tulemused"]: sk = True
for result in row["tulemused"]:
if result == 'TP': grade = gradelist[15]
elif result == 'VSP': grade = gradelist[14]
elif (result == 'PI2' and sk == True) or (result == 'PE2' and sk == True): grade = gradelist[13]
elif (result == 'PI3' and sk == True) or (result == 'PE3' and sk == True): grade = gradelist[12]
elif (result == 'PI4' and sk == True) or (result == 'PE4' and sk == True): grade = gradelist[11]
elif result == 'SK': grade = gradelist[10]
elif (result == 'SP1' and sk == False): grade = gradelist[9]
elif (result == 'SP2' and sk == False): grade = gradelist[8]
elif (result == 'SP3' and sk == False): grade = gradelist[7]
elif (result == 'SP4' and sk == False): grade = gradelist[6]
elif (result == 'SP' and sk == False): grade = gradelist[5]
elif (result == 'VH' and sk == False): grade = gradelist[4]
elif (result == 'H' and sk == False): grade = gradelist[3]
elif (result == 'R' and sk == False): grade = gradelist[2]
elif (result == 'EVH' and sk == False): grade = gradelist[1]
elif (result == '0' and sk == False): grade = gradelist[0]
except:
return grade
return grade
# make new column with zero points to all dogs and then run calculations
df["grade"] = gradelist[0]
df['grade'] = df.apply(calculate_grade, axis=1)
""" Small analysis of results """
# sum dog results and sort values by grade
res_open = df.groupby(['koer', 'gender'])['grade'].sum().reset_index()
res_open = res_open.sort_values('grade', ascending=False).reset_index(drop=True)
# information about judges, where does the judges come from
df_judges = df.drop_duplicates('naitus') # as there is judge per breed and show, we can leave one row per show
df_judges = df_judges.groupby('judge_country')['kohtunik'].count().reset_index(name='nr_of_judges') # get sum of judges per country
#df_judges.to_csv('judge_countries.csv')
""" End of preprocessing, start of CatBoost implementation"""
# Finding the missing values
missingvalues = df.isnull().sum()
df.grade = df.grade.astype(float).fillna(0.0) # convert data type
# Creating a training set for modeling and validation set to check model performance
X = df.drop(['naitus', 'klass', 'gender', 'koeralink', 'tulemused', 'judge_country', 'dogcode', 'dogs_per_show', 'grade'], axis=1)
y = df.grade
X_train, X_validation, y_train, y_validation = train_test_split(X, y, train_size=0.75, random_state=42)
X_test = df.drop(['naitus', 'klass', 'gender', 'koeralink', 'tulemused', 'judge_country', 'dogcode', 'dogs_per_show', 'grade'], axis=1)
# Look at the data type of variables
X.dtypes
categorical_features_indices = np.where(X.dtypes != np.float)[0]
X_train.info()
# regression model
#model=CatBoostRegressor(iterations=32, random_seed=63, learning_rate=0.5, depth=12, eval_metric='RMSE')
#model.fit(X_train, y_train, cat_features=categorical_features_indices, eval_set=(X_validation, y_validation), use_best_model=True, plot=True)
#print('RMSE:', np.sqrt(metrics.mean_squared_error(y_validation, model.predict(X_validation))))
#reg_pred = model.predict(X_test) # predict test results
# classification model
model = CatBoostClassifier(iterations=9, learning_rate=0.5, loss_function='MultiClass', custom_loss=['Accuracy']) #custom_loss='AUC'
model.fit(X_train, y_train, cat_features=categorical_features_indices, eval_set=(X_validation,y_validation), plot=True)
pool = Pool(X, y, cat_features=categorical_features_indices)
scores = cv(pool, model.get_params(), fold_count=5, plot=True)
best_value = np.min(scores['test-MultiClass-mean'])
best_iter = np.argmin(scores['test-MultiClass-mean'])
# print out best scores
print('Best validation accuracy score: {:.2f}±{:.2f} on step {}'.format(
np.max(scores['test-Accuracy-mean']),
scores['test-Accuracy-std'][np.argmax(scores['test-Accuracy-mean'])],
np.argmax(scores['test-Accuracy-mean'])
))
print('Precise validation accuracy score: {}'.format(np.max(scores['test-Accuracy-mean'])))
# print out feature importance of classification model
feature_importances = model.get_feature_importance(pool)
feature_names = X_train.columns
for score, name in sorted(zip(feature_importances, feature_names), reverse=True):
print('{}: {}'.format(name, score))
""" dataframe with test results"""
results =
|
pd.DataFrame()
|
pandas.DataFrame
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
|
tm.assert_equal(result, expected)
|
pandas.util.testing.assert_equal
|
import warnings
warnings.filterwarnings('ignore')
import os
import re
import numpy as np
import tensorflow as tf
import pandas as pd
from tensorflow import keras
from transformers import AutoTokenizer, TFAutoModel
from modules.utils import *
def print_error_train(id, question, context, answer, start_idx, end_idx):
'''
Generic error print for a train dataset
params:
- id: record id.
- question: record's question.
- context: record's context.
- answer: record's answer.
- start_idx: computed start index of true answer.
- end_idx: computed end index of true answer.
'''
print("id:\t\t{} \n".format(id))
print("Question: \t{} \n".format(question))
print("Paragraph:\t{}\n".format(context))
print("True Answer:\t{}".format(answer))
print("Possib Answer:\t{}".format(context[start_idx:end_idx]))
print("Bounds: {} {} ---- Length: {}\n\n".format(start_idx, end_idx, len(context)))
print("==============================================================\n\n\n\n")
def print_error_test(id, question, context):
"""
Generic error print for a test dataset
params:
- id: record id.
- question: record's question.
- context: record's context.
"""
print("id:\t\t{} \n".format(id))
print("Question: \t{} \n".format(question))
print("Paragraph:\t{}\n".format(context))
print("==============================================================\n\n")
def process_test_record(record, tokenizer, max_len = 512, show_bast = False ):
'''
This function tokenizes the train record (without answers) and returns a list with the information needed for a Bert model.
Params:
- record: a single row of the dataset. Here the record has the form: id, title, context, question
- tokenizer: the specific tokenizer it needs to be utilized
- max_len: maximum length accepted by the BERT model.
- show_bast: debugging option, shows when computed data are longer than max_len
Returns:
- [id, title, input_ids, attention_mask, token_type_ids, offset]: if the computation is successfull
- ["","","","","",""]: if the computation went wrong
'''
id = record["id"]
title = record["title"]
context = record["context"]
question = record["question"]
error_return = ["","","","","",""]
# Clean context, answer and question from unnecessary whitespaces
context = " ".join(str(context).split())
question = " ".join(str(question).split())
# Tokenize context and question
tokenized_context = tokenizer(context, return_offsets_mapping=True)
tokenized_question = tokenizer(question, return_offsets_mapping=True)
offsets = tokenized_context.offset_mapping
# Find start and end token index for tokens from answer
max_ctx_space = max_len - len(tokenized_question.input_ids)
interval = [0,max_ctx_space]
# Create inputs take [CLS] and [SEP] from question
input_ids = tokenized_context.input_ids[interval[0]:interval[1]] + tokenized_question.input_ids[1:]
token_type_ids = [0] * len(tokenized_context.input_ids[interval[0]:interval[1]]) + [1] * len(tokenized_question.input_ids[1:] )
attention_mask = [1] * len(input_ids)
# Pad and create attention masks.
# Skip if truncation is needed
padding_length = max_len - len(input_ids)
if padding_length > 0: # pad
input_ids = input_ids + ([0] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
token_type_ids = token_type_ids + ([0] * padding_length)
elif padding_length < 0:
if show_bast:
print("Invalid question: max_lenght reached")
print_error_test(id, question, context)
return error_return
return [id, title, input_ids, attention_mask, token_type_ids, offsets]
def process_train_record(record, tokenizer, max_len = 512, show_oub = False, show_bast = False, show_no_answ = False ):
'''
This function tokenizes the train record (complete with answers) and returns a list with the information needed for a Bert model.
Params:
- record: a single row of the dataset. Here the record has the form: id, title, context, question, answer_text, start_idx
- tokenizer: the specific tokenizer it needs to be utilized
- max_len: maximum length accepted by the BERT model.
- show_oub: debugging option, shows when true answer is out of bound wrt the context
- show_bast: debugging option, shows when computed data are longer than max_len
- show_no_answ: debugging option, shows when the tokenized answer is empty
Returns:
- [id, input_ids, attention_mask, token_type_ids, start_token_idx, end_token_idx, offset]: if the computation is successfull
- ["","","","","","",""]: if the computation went wrong
'''
id = record["id"]
title = record["title"]
context = record["context"]
question = record["question"]
answer = record["answer_text"]
start_idx = record["start_idx"]
error_return = ["","","","","","","",""]
# Clean context, answer and question from unnecessary whitespaces
context = " ".join(str(context).split())
question = " ".join(str(question).split())
answer = " ".join(str(answer).split())
# Find end character index of answer in context
end_idx = start_idx + len(answer)
# Find errors in the dataset
if end_idx > len(context):
if show_oub:
print("Invalid question: out of bound answer")
print_error_train(id, question, context, answer, start_idx, end_idx)
return error_return
# Mark the character indexes in context that are in answer
is_char_in_ans = [0] * len(context)
for idx in range(start_idx, end_idx):
is_char_in_ans[idx] = 1
# Tokenize context and question
tokenized_context = tokenizer(context, return_offsets_mapping=True)
tokenized_question = tokenizer(question, return_offsets_mapping=True)
# Find tokens that were created from answer characters
offsets = tokenized_context.offset_mapping
ans_token_idx = []
for idx, (start, end) in enumerate(offsets):
if sum(is_char_in_ans[start:end]) > 0:
ans_token_idx.append(idx)
if len(ans_token_idx) == 0:
if show_no_answ:
print("Invalid question: no answer token")
print_error_train(id, question, context, answer, start_idx, end_idx)
return error_return
# Find start and end token index for tokens from answer
start_token_idx = ans_token_idx[0]
end_token_idx = ans_token_idx[-1]
max_ctx_space = max_len - len(tokenized_question.input_ids)
interval = [1,max_ctx_space]
# change questions where input_ids would be > max_len
if len(tokenized_question.input_ids) + len(tokenized_context.input_ids) > max_len:
# Consider only the context part that has more influence on the answer
answer_len = end_token_idx - start_token_idx
remain_space = max_len - len(tokenized_question.input_ids) - answer_len
interval = [start_token_idx - remain_space/2, end_token_idx + remain_space/2]
# if the proposed interval is out of bound wrt the context, the interval is
# changed accordingly
# Note that the two if statement cannot be true at the same time
if start_token_idx - remain_space/2 < 0:
interval = [1, end_token_idx + remain_space - start_token_idx]
if start_token_idx + remain_space/2 > max_ctx_space :
interval = [max_ctx_space - answer_len - remain_space, max_ctx_space ]
start_token_idx = start_token_idx - interval[0] + 1
end_token_idx = end_token_idx - interval[0] + 1
# Create inputs take [CLS] and [SEP] from context
input_ids = [101] + tokenized_context.input_ids[interval[0]:interval[1]] + tokenized_question.input_ids[1:]
token_type_ids = [0] * len(tokenized_context.input_ids[interval[0]:interval[1]]) + [1] * len(tokenized_question.input_ids[1:])
attention_mask = [1] * len(input_ids)
# Pad and create attention masks.
# Skip if truncation is needed
padding_length = max_len - len(input_ids)
if padding_length > 0:
input_ids = input_ids + ([0] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
token_type_ids = token_type_ids + ([0] * padding_length)
elif padding_length < 0:
if show_bast:
print("Contex + answer too long")
print_error_train(id, question, context, answer, start_idx, end_idx)
return error_return
return [id, title, input_ids, attention_mask, token_type_ids, start_token_idx, end_token_idx, offsets]
def process_dataset(df, tokenizer, answer_available=True, max_len=512):
'''
Function that processes the whole dataset changing the proceduere based on the presence of answers in the dataset
params:
- df: the dataframe
- tokenizer: the tokenizer specific for the bert model used
- answer_available: True if the answer is in the dataset False if it is not
- max_len: maximum length accepted by the dataset
returns:
- if answer_available=True a dataframe with columns ["id" (index),
"title",
"input_ids",
"attention_mask",
"token_type_ids",
"start_token_idx",
"end_token_idx",
"offsets"]
- if answer_available=True a dataframe with columns ["id" (index),
"title",
"input_ids",
"attention_mask",
"token_type_ids",
"offsets"]
'''
if answer_available:
tmp = [process_train_record(record, tokenizer, max_len) for _, record in df.iterrows()]
columns = ["id","title", "input_ids", "attention_mask", "token_type_ids", "start_token_idx", "end_token_idx", "offsets"]
else:
tmp = [process_test_record(record, tokenizer, max_len) for _, record in df.iterrows()]
columns = ["id","title", "input_ids", "attention_mask", "token_type_ids", "offsets"]
proc_df =
|
pd.DataFrame(tmp, columns=columns)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 08:47:38 2018
@author: cenv0574
"""
import os
import json
import pandas as pd
import geopandas as gpd
from itertools import product
def load_config():
# Define current directory and data directory
config_path = os.path.realpath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'config.json')
)
with open(config_path, 'r') as config_fh:
config = json.load(config_fh)
return config
def load_table(data_path):
vnm_IO_path = os.path.join(data_path,"INPUT-OUTPUT TABLE 2012","IO Table 2012 English.xlsx")
return pd.read_excel(vnm_IO_path,sheet_name='IO_clean',index_col=0)
def load_sectors(data_path):
vnm_IO_path = os.path.join(data_path,"INPUT-OUTPUT TABLE 2012","IO Table 2012 English.xlsx")
vnmIO_rowcol = pd.read_excel(vnm_IO_path,sheet_name='SectorName')
return vnmIO_rowcol
def get_final_sector_classification():
return ['secA','secB','secC','secD','secE','secF','secG','secH','secI']
def map_sectors(vnm_IO_rowcol):
row_only = vnm_IO_rowcol[vnm_IO_rowcol['mapped'].str.contains("row") | vnm_IO_rowcol['mapped'].str.contains("sec") ]
col_only = vnm_IO_rowcol[vnm_IO_rowcol['mapped'].str.contains("col") | vnm_IO_rowcol['mapped'].str.contains("sec") ]
return dict(zip(row_only.code,row_only.mapped)),dict(zip(col_only.code,col_only.mapped))
def aggregate_table(vnm_IO,vnm_IO_rowcol,in_million=True):
sectors = get_final_sector_classification()
#aggregate table
mapper_row,mapper_col = map_sectors(vnm_IO_rowcol)
vnm_IO.index = vnm_IO.index.map(mapper_row.get)
vnm_IO.columns = vnm_IO.columns.to_series().map(mapper_col)
aggregated = vnm_IO.groupby(vnm_IO.index,axis=0).sum().groupby(vnm_IO.columns, axis=1).sum()
aggregated = aggregated.reindex(sectors+['col1','col2','col3'],axis='columns')
aggregated = aggregated.reindex(sectors+['row1','row2','row3'],axis='index')
if in_million == True:
return aggregated/1000000
else:
return aggregated
def is_balanced(io_table):
row = io_table.sum(axis=0)
col = io_table.sum(axis=1)
if ((row-col).sum() < 1):
print('Table is balanced')
def load_provincial_stats(data_path):
prov_path = os.path.join(data_path,'Vietnam_boundaries','boundaries_stats','province_level_stats.shp')
return gpd.read_file(prov_path)
def estimate_gva(regions,in_million=True):
if in_million == True:
return list(((regions.pro_nfirm*regions.laborcost)+(regions.pro_nfirm*regions.capital))/1000000)
else:
return list(((regions.pro_nfirm*regions.laborcost)+(regions.pro_nfirm*regions.capital)))
def create_proxies(data_path,notrade=False,own_production_ratio=0.9,min_rice=True):
provinces = load_provincial_stats(data_path)
provinces.name_eng = provinces.name_eng.apply(lambda x: x.replace(' ','_').replace('-','_'))
od_table = load_od(data_path,min_rice=min_rice)
create_indices(data_path,provinces,write_to_csv=True)
create_regional_proxy(data_path,provinces,write_to_csv=True)
create_sector_proxies(data_path,provinces,write_to_csv=True)
create_zero_proxies(data_path,od_table,notrade=notrade,write_to_csv=True)
if notrade == False:
create_level14_proxies(data_path,od_table,own_production_ratio,write_to_csv=True)
def create_regional_proxy(data_path,regions,write_to_csv=True):
regions['raw_gva'] = estimate_gva(regions) #regions['pro_nfirm']*regions['laborcost'] + regions['pro_nfirm']*regions['capital']
subset = regions.loc[:,['name_eng','raw_gva']]
subset['year'] = 2010
subset['raw_gva'] = subset.raw_gva.apply(int)/(subset['raw_gva'].sum(axis='index'))
subset = subset[['year','name_eng','raw_gva']]
subset.columns = ['year','id','gdp']
if write_to_csv == True:
csv_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','proxy_reg_vnm.csv')
subset.to_csv(csv_path,index=False)
def create_indices(data_path,provinces,write_to_csv=True):
# prepare index and cols
region_names = list(provinces.name_eng)
rowcol_names = list(load_sectors(data_path)['mapped'].unique())
rows = [x for x in rowcol_names if (x.startswith('sec') | x.startswith('row'))]*len(region_names)
region_names_list = [item for sublist in [[x]*12 for x in region_names] for item in sublist]
indices = pd.DataFrame([region_names_list,rows]).T
indices.columns = ['region','sector']
indices['sector'] = indices['sector'].apply(lambda x: x.replace('row','other'))
if write_to_csv == True:
csv_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','indices_mrio.csv')
indices.to_csv(csv_path,index=False)
def create_sector_proxies(data_path,regions,write_to_csv=True):
#list of sectors
sector_list = get_final_sector_classification()
#get own sector classification for region file
map_dict = map_sect_vnm_to_eng()
regions=regions.rename(columns = map_dict)
# get sectoral gva based on proportion of firms in the region
sector_shares = regions[sector_list].multiply(regions['raw_gva'],axis='index')
sector_shares.index = regions.name_eng
for sector in sector_list+['other1','other2','other3']:
if sector in ['other1','other2','other3']:
subset = pd.DataFrame(sector_shares.sum(axis='columns')).divide(pd.DataFrame(sector_shares.sum(axis='columns')).sum(axis='index'))
subset.columns = [sector]
else:
subset = pd.DataFrame(sector_shares.loc[:,sector]).divide(pd.DataFrame(sector_shares.loc[:,sector]).sum(axis='index'))
subset.reset_index(inplace=True,drop=False)
subset['year'] = 2010
subset['sector'] = sector+str(1)
subset[sector] = subset[sector].apply(lambda x: round(x,7))
subset = subset[['year','sector','name_eng',sector]]
subset.columns = ['year','sector','region','gdp']
if write_to_csv == True:
csv_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','proxy_{}.csv'.format(sector))
subset.to_csv(csv_path,index=False)
def get_trade_value(x,sum_use,sector,own_production_ratio=0.9):
if x.Destination == x.Origin:
try:
return list(sum_use.loc[(sum_use['region'] == x.Destination) & (sum_use['sector'] == sector)]['value'])[0]*own_production_ratio
except:
return 1
elif x.gdp == 0:
return 0
else:
try:
return list(sum_use.loc[(sum_use['region'] == x.Destination) & (sum_use['sector'] == sector)]['value'])[0]*(1-own_production_ratio)*x.ratio
except:
return 0
def create_level14_proxies(data_path,od_table,own_production_ratio=0.9,write_to_csv=True):
# get sector list
sector_list_ini = get_final_sector_classification()+['other1','other2','other3']
sector_list = [x+str(1) for x in sector_list_ini]
od_table.loc[od_table['Destination'] == od_table['Origin'],'gdp'] = 10
od_sum = pd.DataFrame(od_table.groupby(['Destination','Origin']).sum().sum(axis=1))
od_sum['ratio'] = od_sum.groupby(level=0).apply(lambda x:
x / float(x.sum()))
od_sum.reset_index(inplace=True)
od_sum.columns = ['Destination','Origin','gdp','ratio']
df_pretable = pd.read_csv(os.path.join(data_path,'IO_analysis','MRIO_TABLE','notrade_trade.csv'),index_col=[0,1],header=[0,1])
df_pretable = df_pretable.iloc[:,:567]
sum_use = df_pretable.sum(axis=1)
sum_use = pd.DataFrame(sum_use*0.1)
sum_use.reset_index(inplace=True)
sum_use.columns = ['region','sector','value']
combine = []
for sector in sector_list:
if sector[:-1] in ['other1','other2','other3']:
subset = od_sum.copy()
subset['year'] = 2010
subset['sector'] = sector
subset['gdp'] = 0
subset.drop('ratio',axis=1,inplace=True)
combine.append(subset)
else:
subset = od_sum.copy()
subset = subset.loc[od_sum.gdp != 0]
subset['year'] = 2010
subset['sector'] = sector
subset['gdp'] = subset.apply(lambda x: get_trade_value(x,sum_use,sector[:-1],own_production_ratio),axis=1) #subset['gdp'].apply(lambda x: round(x,2))
subset.drop('ratio',axis=1,inplace=True)
combine.append(subset)
all_ = pd.concat(combine)
final_sub = all_[['year','sector','Origin','Destination','gdp']]
final_sub.columns = ['year','sector','region','region','gdp']
if write_to_csv == True:
csv_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','proxy_trade14_{}.csv'.format(sector[:-1]))
final_sub.to_csv(csv_path,index=False)
def create_zero_proxies(data_path,od_table,notrade=False,write_to_csv=True):
# get sector list
sector_list = get_final_sector_classification()+['other1','other2','other3']
sector_list = [x+str(1) for x in sector_list]
#map sectors to be the same
mapper = map_regions()
od_table['Destination'] = od_table['Destination'].apply(lambda x: mapper[x])
od_table['Origin'] = od_table['Origin'].apply(lambda x: mapper[x])
od_table = od_table.loc[od_table['Destination'] != od_table['Origin']]
od_sum = pd.DataFrame(od_table.groupby(['Destination','Origin']).sum().sum(axis=1))
od_sum.reset_index(inplace=True)
od_sum.columns = ['Destination','Origin','gdp']
if notrade == True:
od_sum['gdp'] = 0
for sector in sector_list:
if sector[:-1] in ['other1','other2','other3']:
subset = od_sum.copy()
subset['year'] = 2010
subset['sector'] = sector
subset['gdp'] = 0
combine = []
for sector2 in sector_list:
sub_subset = subset.copy()
sub_subset['subsector'] = sector2
combine.append(sub_subset)
else:
subset = od_sum.copy()
if notrade == False:
subset = subset.loc[od_sum.gdp == 0]
subset['year'] = 2010
subset['sector'] = sector
subset['gdp'] = 0
combine = []
for sector2 in sector_list:
sub_subset = subset.copy()
sub_subset['subsector'] = sector2
combine.append(sub_subset)
all_ = pd.concat(combine)
final_sub = all_[['year','sector','Origin','subsector','Destination','gdp']]
final_sub.columns = ['year','sector','region','sector','region','gdp']
if write_to_csv == True:
csv_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','proxy_trade_{}.csv'.format(sector[:-1]))
final_sub.to_csv(csv_path,index=False)
def load_output(data_path,provinces,notrade=True):
# prepare index and cols
region_names = list(provinces.name_eng)
rowcol_names = list(load_sectors(data_path)['mapped'].unique())
rows = [x for x in rowcol_names if (x.startswith('sec') | x.startswith('row'))]*len(region_names)
cols = [x for x in rowcol_names if (x.startswith('sec') | x.startswith('col'))]*len(region_names)
region_names_list = [item for sublist in [[x]*12 for x in region_names] for item in sublist]
index_mi = pd.MultiIndex.from_arrays([region_names_list,rows], names=('region', 'row'))
column_mi = pd.MultiIndex.from_arrays([region_names_list,cols], names=('region', 'col'))
# read output
if notrade == True:
output_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','output_notrade.csv')
else:
output_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','output.csv')
output_df =
|
pd.read_csv(output_path,header=None)
|
pandas.read_csv
|
import logging
import os
import pandas as pd
import argparse
import random
from math import exp
from scipy.special import softmax
from collections import Counter
from data_processors import RedditInDomainDataProcessor, RedditOutOfDomainDataProcessor
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.ensemble import VotingClassifier, BaggingClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.neural_network import MLPClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.metrics import f1_score
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from xgboost import XGBClassifier
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class WordStemTransformer(TransformerMixin, BaseEstimator):
def __init__(self):
self.porter = PorterStemmer()
def transform(self, X):
X_trans = []
for example in X:
words = word_tokenize(example)
words = list(map(self.porter.stem, words))
X_trans.append(' '.join(words))
return X_trans
def fit(self, X, y=None):
return self
class FunctionWordTransformer(WordStemTransformer):
def __init__(self):
self.stopwords = set(stopwords.words('english'))
def transform(self, X):
X_trans = []
for example in X:
words = word_tokenize(example)
function_words = list(filter(lambda word: word in self.stopwords, words))
X_trans.append(' '.join(function_words))
return X_trans
def get_prediction_data(dir_path, fold_nr, name, model_type, max_features):
dir_path += fold_nr + '/'
if not os.path.exists(dir_path):
return pd.DataFrame()
match_str = f'{name}_{model_type}_{max_features}'
matches = list(filter(lambda filename: filename.startswith(match_str),
os.listdir(dir_path)))
if len(matches) > 0:
return pd.read_csv(dir_path + matches[0]).sort_values(by=['guid'])
return pd.DataFrame()
def str2model(model_name):
models = {
'svm' : SVC(kernel='linear', cache_size=4098, decision_function_shape='ovr', probability=True),
'ffnn' : MLPClassifier(),
'XGBoost' : XGBClassifier(max_depth=20),
}
return models[model_name]
def get_tfidf_pipeline_for_model(model_name, ngram_range, analyzer, max_features):
return [
('tf-idf', TfidfVectorizer(max_features=max_features, ngram_range=ngram_range, analyzer=analyzer)),
(model_name, str2model(model_name))
]
def get_lemma_pipeline_for_model(model_name, ngram_range, analyzer, max_features):
return [ ('lemma', WordStemTransformer())] + get_tfidf_pipeline_for_model(model_name, ngram_range, analyzer, max_features)
def get_func_word_pipeline_for_model(model_name, ngram_range, analyzer, max_features):
return [ ('function-words', FunctionWordTransformer())] + get_tfidf_pipeline_for_model(model_name, ngram_range, analyzer, max_features)
def get_all_reddit_examples():
data_proc = RedditInDomainDataProcessor(0)
data_proc.discover_examples()
examples = []
for usernames in data_proc.lang2usernames.values():
for username in usernames:
user_examples = data_proc.user2examples[username]
examples.extend(user_examples)
return sorted(examples, key=lambda ex: ex.guid)
def get_all_out_of_domain_examples():
data_proc = RedditOutOfDomainDataProcessor(0)
data_proc.get_train_examples()
examples = []
for user_examples in data_proc.europe_user2examples.values():
# differ between indomain and out-of-domain
for example in user_examples:
examples.append(example)
for user_examples in data_proc.non_europe_user2examples.values():
for example in user_examples:
example.guid = 'out-of-domain-' + example.guid
examples.append(example)
return examples
def get_examples_based_on_csv(csv_filepath, examples, is_testing=False):
logger.info(f'csv filepath {csv_filepath}')
if is_testing:
guids = set(pd.read_csv(csv_filepath)['guid'].apply(lambda col: 'out-of-domain-' + col))
else:
guids = set(pd.read_csv(csv_filepath)['guid'])
filtered_examples = list(filter(lambda ex: ex.guid in guids, examples))
logger.info(f'Len filtered: {len(filtered_examples)} len guids: {len(guids)}')
assert len(filtered_examples) == len(guids)
return sorted(filtered_examples, key=lambda ex: ex.guid)
def save_results(predictions_path, model_name, bagging_estimator, estimators, max_features, with_bert, eval_acc, f1):
base_estimator_name = estimators[0][1].steps[-1][0]
bert_string = 'wBERT_' if with_bert else ''
filename = f'{model_name}_{bagging_estimator}_{base_estimator_name}_{max_features}_{bert_string}{eval_acc:.3f}_{f1:.3f}'
with open(predictions_path + f'/results/{filename}.txt', 'w') as f:
f.write(f'accuracy : {eval_acc}\n')
f.write(f'f1 : {f1}\n')
f.write(', '.join([estimator[0] for estimator in estimators]))
def merge_with_bert(df, csv_filepath, bert_output_type=None, is_testing=False):
def map_logit_to_probability(logit):
odds = exp(logit)
prob = odds / (1 + odds)
return prob
bert_df = pd.read_csv(csv_filepath).drop(columns=['input', 'output', 'input_label', 'output_label'])
if is_testing:
bert_df['guid'] = bert_df['guid'].apply(lambda col: 'out-of-domain-' + col)
bert_df = bert_df.sort_values(by=['guid'])
non_guid_columns = bert_df.columns.difference(['guid'])
if bert_output_type == 'probabilities':
# Map logits to probabilities for all columns, except guid
bert_df[non_guid_columns] = bert_df[non_guid_columns].applymap(lambda cell: map_logit_to_probability(cell))
elif bert_output_type == 'softmax':
bert_df[non_guid_columns] = bert_df[non_guid_columns].apply(lambda row: softmax(row), axis=1)
elif bert_output_type == 'all':
bert_df[[col + '_prop' for col in non_guid_columns]] = bert_df[non_guid_columns].applymap(lambda cell: map_logit_to_probability(cell))
bert_df[[col + '_softmax' for col in non_guid_columns]] = bert_df[non_guid_columns].apply(lambda row: softmax(row), axis=1)
combined_df = pd.merge(df, bert_df, on=['guid'])
return combined_df
def main(args):
reddit = True
use_bert = True
out_of_domain = args.out_of_domain
logger.info(f'Using out of domain: {out_of_domain}')
bert_output_type = ''
max_features = 30000
stack_type = 'meta_classifier'
meta_classifier_type = 'ffnn'
base_model_type = 'ffnn'
domain_str = '/out-of-domain/' if out_of_domain else '/'
folds_location = f'./results/reddit{domain_str}seq_512_batch_16_epochs_5.0_lr_3e-05'
num_bagging_classifiers = 200
max_samples = 0.8
mem_path = './common_predictions/cache'
prefix = 'reddit' if reddit else 'toefl'
predictions_path = f'./common_predictions/{prefix}_predictions{domain_str}'
all_examples = get_all_reddit_examples() if not out_of_domain else get_all_out_of_domain_examples()
if args.fold_nr:
filename = f'fold_{args.fold_nr}.csv'
logger.info(f'Running only file: {filename}')
filenames = [filename]
else:
filenames = sorted(os.listdir(folds_location))
for filename in filenames:
if os.path.isdir(filename):
# Skip the training folds folder itself
continue
# Base training on the same folds used for BERT
fold_nr, file_type = filename.split('.')
if not file_type == 'csv' or 'lock' in filename:
continue
logger.info(f'Loading data for fold {fold_nr}')
training_folder = folds_location + '/training_predictions'
training_filename = list(filter(lambda name: fold_nr in name, sorted(os.listdir(training_folder))))[0]
bert_training_file = f'{training_folder}/{training_filename}'
bert_test_file = f'{folds_location}/{filename}'
training_examples = get_examples_based_on_csv(bert_training_file, all_examples)
test_examples = get_examples_based_on_csv(bert_test_file, all_examples, is_testing=out_of_domain)
training_examples_no_guid = [ex.text_a for ex in training_examples]
y_train_no_guid = [ex.label for ex in training_examples]
y_test_no_guid = [ex.label for ex in test_examples]
test_examples_no_guid = [ex.text_a for ex in test_examples]
training_guids = [ex.guid for ex in training_examples]
test_guids = [ex.guid for ex in test_examples]
logger.info(f'Running {fold_nr} {max_features} {stack_type} {base_model_type}')
char_2_gram_pipeline = Pipeline(get_tfidf_pipeline_for_model(base_model_type, (2,2), 'char', max_features), memory=mem_path)
char_3_gram_pipeline = Pipeline(get_tfidf_pipeline_for_model(base_model_type, (3,3), 'char', max_features), memory=mem_path)
char_4_gram_pipeline = Pipeline(get_tfidf_pipeline_for_model(base_model_type, (4,4), 'char', max_features), memory=mem_path)
word_1_gram_pipeline = Pipeline(get_tfidf_pipeline_for_model(base_model_type, (1,1), 'word', max_features), memory=mem_path)
word_2_gram_pipeline = Pipeline(get_tfidf_pipeline_for_model(base_model_type, (2,2), 'word', max_features), memory=mem_path)
word_3_gram_pipeline = Pipeline(get_tfidf_pipeline_for_model(base_model_type, (3,3), 'word', max_features), memory=mem_path)
lemma_1_gram_pipeline = Pipeline(get_lemma_pipeline_for_model(base_model_type, (1,1), 'word', max_features), memory=mem_path)
lemma_2_gram_pipeline = Pipeline(get_lemma_pipeline_for_model(base_model_type, (2,2), 'word', max_features), memory=mem_path)
func_1_gram_pipeline = Pipeline(get_func_word_pipeline_for_model(base_model_type, (1,1), 'word', max_features), memory=mem_path)
func_2_gram_pipeline = Pipeline(get_func_word_pipeline_for_model(base_model_type, (2,2), 'word', max_features), memory=mem_path)
estimators = [
('char2', char_2_gram_pipeline),
('char3', char_3_gram_pipeline),
('char4', char_4_gram_pipeline),
('word1', word_1_gram_pipeline),
('word2', word_2_gram_pipeline),
('word3', word_3_gram_pipeline),
('lemma1', lemma_1_gram_pipeline),
('lemma2', lemma_2_gram_pipeline),
('func1', func_1_gram_pipeline),
('func2', func_2_gram_pipeline),
]
for name, pipeline in estimators:
model_name = pipeline.steps[-1][0]
if not get_prediction_data(predictions_path + 'train/', fold_nr, name, model_name, max_features).empty:
logger.info(f'Skipping {fold_nr} {name} {model_name} {max_features}')
continue
logger.info(f'Traning {name} {model_name} {max_features}...')
pipeline.fit(training_examples_no_guid, y_train_no_guid)
training_predictions = pipeline.predict_proba(training_examples_no_guid)
test_predictions = pipeline.predict_proba(test_examples_no_guid)
classes = pipeline.steps[-1][1].classes_
training_df = pd.DataFrame(data=training_predictions, columns=classes)
training_df['guid'] = training_guids
test_df = pd.DataFrame(data=test_predictions, columns=classes)
test_df['guid'] = test_guids
eval_acc = pipeline.score(test_examples_no_guid, y_test_no_guid)
logger.info(f'Model accuracy: {eval_acc}')
full_name = f'{name}_{model_name}_{max_features}_{eval_acc:.3f}.csv'
training_fold_folder = f'{predictions_path}train/{fold_nr}'
test_fold_folder = f'{predictions_path}test/{fold_nr}'
if not os.path.exists(training_fold_folder):
os.makedirs(training_fold_folder)
if not os.path.exists(test_fold_folder):
os.makedirs(test_fold_folder)
training_df.to_csv(f'{training_fold_folder}/{full_name}', index=False)
test_df.to_csv(f'{test_fold_folder}/{full_name}', index=False)
training_frames = []
test_frames = []
for name, pipeline in estimators:
model_name = pipeline.steps[-1][0]
training_df = get_prediction_data(predictions_path + 'train/', fold_nr, name, model_name, max_features)
test_df = get_prediction_data(predictions_path + 'test/', fold_nr, name, model_name, max_features)
assert not training_df.empty, 'Training data frame was empty'
assert not test_df.empty, 'Test data frame was empty'
training_frames.append(training_df)
test_frames.append(test_df)
all_training_data_df = training_frames[0]
all_test_data_df = test_frames[0]
for i in range(1, len(training_frames)):
all_training_data_df = pd.merge(all_training_data_df, training_frames[i], on='guid')
all_test_data_df =
|
pd.merge(all_test_data_df, test_frames[i], on='guid')
|
pandas.merge
|
import gc
import pandas as pd
import numpy as np
from typing import Generator, Tuple, List
from tqdm import tqdm
from toolbox.ml.model_wrapper import ModelWrapper
from toolbox.utils.slice_holder import SliceHolder
def calc_ml_factor(model: ModelWrapper, features: pd.DataFrame, target: pd.Series, eval_days: int, refit_every: int,
expanding: int = None, rolling: int = None) -> pd.Series:
"""
Calculates an alpha factor using a ML factor combination method.
The model is fit and predictions are made in a ModelWrapper
This function organizes the data so the model can make unbiased predictions
on what would have been point in time data.
this function assumes that the data passed has all trading days in it (first level of index).
Ex if the the data is missing for one day then we will miss a
:param model: the ModelWrapper that will be used to make predictions.
:param features: the features to train the model on
there cannot be null values
must have a multi index of (pd.Timestamp, symbol)
:param target: the target we are going to fit the model to
there cannot be null values
must have a multi index of (pd.Timestamp, symbol)
:param eval_days: IF INCORRECT THERE WILL BE LOOK AHEAD BIAS
the amount of days it takes to know the predictions outcome
this number should simply be the length of return we are trying to predict
:param refit_every: the amount of consecutive days to predict using a single model
this is essentially saying refit the model every x days
:param expanding: the minimum amount of days of data to train on
if rollingDays is passed then this should not be passed
if this value is passed then the model will be trained with an expanding window of data
:param rolling: the amount of rolling days to fit a model to
if minTrainDays is passed then this should not be passed
:return: pandas series of predictions. The index will be the same as "features"
"""
features_copy: pd.DataFrame = features.copy().sort_index()
target_copy: pd.Series = target.copy().sort_index()
if not np.isfinite(features_copy.values).all():
raise ValueError('There are nan or inf values in the features')
if not np.isfinite(target_copy.values).all():
raise ValueError('There are nan or inf values in the target')
if not isinstance(features_copy.index, pd.MultiIndex):
raise ValueError('Features and target must have a pd.MultiIndex of (pd.Timestamp, str)')
if not isinstance(features_copy.index.get_level_values(0), pd.DatetimeIndex):
raise ValueError('Features and target must have index level 0 of pd.DatetimeIndex')
if not features_copy.index.equals(target_copy.index):
raise ValueError('The index for the features and target is different')
train_predict_slices: Generator[Tuple[SliceHolder, SliceHolder], None, None] = \
generate_indexes(features_copy.index, eval_days, refit_every, expanding, rolling)
ml_alpha: List[pd.Series] = []
for train_slice, predict_slice in tqdm(train_predict_slices):
features_train = features_copy.loc[train_slice.start:train_slice.end]
target_train = target_copy.loc[train_slice.start:train_slice.end]
predict = features_copy.loc[predict_slice.start:predict_slice.end]
ml_alpha.append(model.predict(features_train, target_train, predict))
del features_copy, target_copy
gc.collect()
return
|
pd.concat(ml_alpha)
|
pandas.concat
|
from flask import Flask, request, jsonify
import pickle
import datetime
from datetime import date
import pandas
app = Flask(__name__)
import json
import csv
import math
import pytz
import random
import os, time
from sklearn.preprocessing import PolynomialFeatures
def get_daytype(now):
if now < 5:
return 0, 1
else:
return 1, 0
def get_season(now):
Y = 2016
seasons = [('winter', (date(Y, 1, 1), date(Y, 2, 29))),
('spring', (date(Y, 3, 1), date(Y, 5, 31))),
('summer', (date(Y, 6, 1), date(Y, 6, 30)))]
now = now.replace(year=Y)
return next(season for season, (start, end) in seasons
if start <= now <= end)
def get_timeslot(now):
start_time = '00:00'
end_time = '23:59'
slot_time = 30
c = 1
hours = {}
time = datetime.datetime.strptime(start_time, '%H:%M')
end = datetime.datetime.strptime(end_time, '%H:%M')
while time <= end:
hours[time.time()] = c
time += datetime.timedelta(minutes=slot_time)
c += 1
timeMinute = now.minute
timeHour = now.hour
convertTime = str(timeMinute) + str(timeHour)
time = ''
if timeMinute > 30:
timeMinute = 30
convertTime = str(timeHour) + ':' + str(30)
time = datetime.datetime.strptime(convertTime, '%H:%M').time()
else:
timeMinute = 0
convertTime = str(timeHour) + ':' + str(00)
time = datetime.datetime.strptime(convertTime, '%H:%M').time()
return hours[time]
@app.route('/', methods=['GET'])
def get():
date_time_str = int(request.args.get('times'))/1000
date_time_obj = datetime.datetime.fromtimestamp(int(date_time_str))
# tz_src = pytz.timezone("America/Chicago")
# tz_dest = pytz.timezone("America/New_York")
# d_aware = tz_src.localize(date_time_obj)
# date_time_obj = d_aware.astimezone(tz_dest)
fileObject = open('ml2.sav', 'rb')
model = pickle.load(fileObject)
df_header = pandas.read_csv('header.csv')
h3_Contents = {}
zones_h3_Contents = {}
with open('zones.csv', 'r') as f:
zones_h3_Contents = {row[0]:0 for row in csv.reader(f)}
del zones_h3_Contents['pickup-zone']
zone_Scores = {}
h3_Content = {}
with open('manhattan_zones_lat_lon_3.json') as json_file:
h3_Contents = json.load(json_file)
for k in h3_Contents.keys():
if k in zones_h3_Contents:
h3_Content['pickup-zone_'+k] = [0]
seasons = ['winter', 'spring', 'summer']
d = date_time_obj.date()
t = date_time_obj.time()
# season = get_season(d)
timeslot = get_timeslot(t)
weekend, weekday = get_daytype(date_time_obj.weekday())
seasons_dict = {}
seasons_dict['season_spring'] = [1]
# for s in seasons:
# if s == season:
# seasons_dict['season_'+s] = [1]
weekend_dict = {'weekend': [weekend], 'weekday': [weekday]}
timeslot_dict = {'pickup_time_slot': [timeslot]}
h3_Content.update(weekend_dict)
h3_Content.update(seasons_dict)
h3_Content.update(timeslot_dict)
df =
|
pandas.DataFrame.from_dict(h3_Content)
|
pandas.DataFrame.from_dict
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import math
from scipy.stats import t
import unittest
import numpy as np
import pandas as pd
from mlos.Optimizers.RegressionModels.HomogeneousRandomForestRegressionModel import HomogeneousRandomForestRegressionModel
from mlos.Optimizers.RegressionModels.HomogeneousRandomForestConfigStore import homogeneous_random_forest_config_store
from mlos.Optimizers.RegressionModels.Prediction import Prediction
from mlos.Optimizers.ExperimentDesigner.UtilityFunctions.ConfidenceBoundUtilityFunction import \
ConfidenceBoundUtilityFunction, confidence_bound_utility_function_config_store
from mlos.Spaces import ContinuousDimension, Point, SimpleHypergrid
import mlos.global_values as global_values
class TestConfidenceBoundUtilityFunction(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
global_values.declare_singletons()
cls.slope = 10
cls.y_intercept = 10
cls.input_values = np.linspace(start=0, stop=100, num=1000, endpoint=True)
cls.output_values = cls.input_values * cls.slope + cls.y_intercept
cls.input_space = SimpleHypergrid(
name="input",
dimensions=[ContinuousDimension(name="x", min=0, max=100)]
)
cls.output_space = SimpleHypergrid(
name="output",
dimensions=[ContinuousDimension(name="y", min=-math.inf, max=math.inf)]
)
cls.input_pandas_dataframe = pd.DataFrame({"x": cls.input_values})
cls.output_pandas_dataframe =
|
pd.DataFrame({"y": cls.output_values})
|
pandas.DataFrame
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-04 10:00')])
self.assert_series_equal(expected, result)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp(
'2011-01-02 10:00', tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00',
tz=tz)])
self.assert_series_equal(expected, result)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2013-01-01'), Timestamp('2011-01-03 10:00', tz=tz), Timestamp(
'2013-01-01')])
self.assert_series_equal(expected, result)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
self.assert_series_equal(expected, result)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(value=5), exp)
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
self.assertIn('ffil', str(inst))
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
from pandas import tslib
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
self.assertTrue(isnull(td1[0]))
self.assertEqual(td1[0].value, tslib.iNaT)
td1[0] = td[0]
self.assertFalse(isnull(td1[0]))
td1[1] = tslib.iNaT
self.assertTrue(isnull(td1[1]))
self.assertEqual(td1[1].value, tslib.iNaT)
td1[1] = td[1]
self.assertFalse(isnull(td1[1]))
td1[2] = tslib.NaT
self.assertTrue(isnull(td1[2]))
self.assertEqual(td1[2].value, tslib.iNaT)
td1[2] = td[2]
self.assertFalse(isnull(td1[2]))
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# self.assertEqual(isnull(result).sum(), 7)
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
self.assertEqual(len(s.dropna()), 0)
s.dropna(inplace=True)
self.assertEqual(len(s), 0)
# invalid axis
self.assertRaises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
self.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
self.assertEqual(s.dtype, 'datetime64[ns, Asia/Tokyo]')
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
self.assertEqual(result.dtype, 'datetime64[ns, Asia/Tokyo]')
self.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
self.assert_series_equal(result, s)
self.assertFalse(result is s)
s2 = s.copy()
s2.dropna(inplace=True)
self.assert_series_equal(s2, s)
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notnull(ts)])
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.isnull(),
Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.notnull(),
Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
self.assertEqual(result.name, self.ts.name)
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
self.assertEqual(ts.name, name)
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
class TestSeriesInterpolateData(TestData, tm.TestCase):
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interpolate_pchip(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_akima(self):
tm._skip_if_no_scipy()
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_piecewise_polynomial(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_from_derivatives(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='from_derivatives')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
|
assert_series_equal(result, expected)
|
pandas.util.testing.assert_series_equal
|
#Import needed packages
import pandas as pd
#import geopandas as gpd
from pydrip import drip_dam
from pydrip import drip_sources
def get_data():
'''
Description
------------
Retrieves source data from American Rivers Dam Removal Database and USGS Dam Removal Science Database
Output
-----------
american_rivers_df: American Rivers database in pandas dataframe
dam_removal_science_df: USGS Dam Removal Science database in pandas dataframe
'''
#get latest American Rivers Data
ar_url = drip_sources.get_american_rivers_data_url()
american_rivers_df = drip_sources.read_american_rivers(ar_url)
#get latest Dam Removal Science Data
drd_url = drip_sources.get_science_data_url()
dam_removal_science_df = drip_sources.read_science_data(drd_url)
return american_rivers_df, dam_removal_science_df
def build_drip_dams_table(dam_removal_science_df, american_rivers_df):
'''
Description
------------
Builds table of all dam removals from both USGS and American Rivers sources.
This dataset represents dams shown in the Dam Removal Science Database.
'''
#Select fields that contain dam information or american rivers id
dam_science_df = drip_sources.get_science_subset(dam_removal_science_df, target='Dam')
#For each dam in science database find best available data for the dam, first looking in science database and if null look in American Rivers
all_dam_info = []
for dam in dam_science_df.itertuples():
removal_data = drip_dam.Dam(dam_id=dam.DamAccessionNumber)
removal_data.science_data(dam)
removal_data.update_missing_data(american_rivers_df)
removal_data.add_geometry()
all_dam_info.append(removal_data.__dict__)
#For each dam only in American Rivers database, get AR data
ar_only_dams = drip_sources.get_ar_only_dams(american_rivers_df, dam_science_df)
for dam in ar_only_dams.itertuples():
removal_data = drip_dam.Dam(dam_id=dam.AR_ID, dam_source='American Rivers')
removal_data.ar_dam_data(dam)
removal_data.add_geometry()
all_dam_info.append(removal_data.__dict__)
all_dam_df =
|
pd.DataFrame(all_dam_info)
|
pandas.DataFrame
|
import torch
import pathlib
import pandas as pd
import pytorch_lightning as pl
from datetime import datetime
from collections import OrderedDict
class CSVLogger(pl.Callback):
"""Custom metric logger and model checkpoint."""
def __init__(self, output_path=None):
super(CSVLogger, self).__init__()
self._epoch = None
if output_path is None:
self.logger_path = None
else:
self.logger_path = pathlib.Path(output_path)
self.logger_path.mkdir(parents=True, exist_ok=True)
def metrics(self, interval):
if interval == 'epoch':
return self.epoch_metrics
elif interval in ['step', 'batch']:
return self.batch_metrics
@property
def batch_metrics(self):
metrics_path = self.logger_path / 'metrics_batch.csv'
return pd.read_csv(metrics_path)
@property
def epoch_metrics(self):
metrics_path = self.logger_path / 'metrics_epoch.csv'
return pd.read_csv(metrics_path)
def _extract_metrics(self, trainer, interval):
metrics = trainer.callback_metrics
metric_keys = list(metrics.keys())
data_dict = OrderedDict()
if interval == 'epoch':
metric_keys.remove('epoch')
data_dict['epoch'] = metrics['epoch']
data_dict['time'] = str(datetime.now())
elif interval in ['step', 'batch']:
remove_list = ['train', 'val', 'epoch']
for m in metrics.keys():
if any(sub in m for sub in remove_list):
metric_keys.remove(m)
data_dict[interval] = trainer.global_step
for k in metric_keys:
if isinstance(metrics[k], dict):
for j in metrics[k].keys():
data_dict[j] = metrics[k][j]
else:
data_dict[k] = metrics[k]
# cleanup
for k in data_dict.keys():
try:
data_dict[k] = float(data_dict[k].cpu())
except Exception:
pass
return data_dict
def _log_csv(self, trainer, metrics_path, interval):
data_dict = self._extract_metrics(trainer, interval)
new_metrics = pd.DataFrame.from_records([data_dict], index=interval)
if metrics_path.exists():
config = dict(header=False, mode='a')
old_metrics = self.metrics(interval).set_index(interval)
if not new_metrics.columns.equals(old_metrics.columns):
new_metrics =
|
pd.concat([old_metrics, new_metrics])
|
pandas.concat
|
from datetime import datetime
import numpy as np
import pytest
from pandas import Index, MultiIndex, Series
import pandas._testing as tm
class TestSeriesAlterAxes:
def test_setindex(self, string_series):
# wrong type
msg = (
r"Index\(\.\.\.\) must be called with a collection of some "
r"kind, None was passed"
)
with pytest.raises(TypeError, match=msg):
string_series.index = None
# wrong length
msg = (
"Length mismatch: Expected axis has 30 elements, "
"new values have 29 elements"
)
with pytest.raises(ValueError, match=msg):
string_series.index = np.arange(len(string_series) - 1)
# works
string_series.index = np.arange(len(string_series))
assert isinstance(string_series.index, Index)
# Renaming
def test_set_name_attribute(self):
s = Series([1, 2, 3])
s2 = Series([1, 2, 3], name="bar")
for name in [7, 7.0, "name", datetime(2001, 1, 1), (1,), "\u05D0"]:
s.name = name
assert s.name == name
s2.name = name
assert s2.name == name
def test_set_name(self):
s = Series([1, 2, 3])
s2 = s._set_name("foo")
assert s2.name == "foo"
assert s.name is None
assert s is not s2
def test_set_index_makes_timeseries(self):
idx = tm.makeDateIndex(10)
s = Series(range(10))
s.index = idx
assert s.index.is_all_dates
def test_reorder_levels(self):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
s = Series(np.arange(6), index=index)
# no change, position
result = s.reorder_levels([0, 1, 2])
tm.assert_series_equal(s, result)
# no change, labels
result = s.reorder_levels(["L0", "L1", "L2"])
tm.assert_series_equal(s, result)
# rotate, position
result = s.reorder_levels([1, 2, 0])
e_idx = MultiIndex(
levels=[["one", "two", "three"], [0, 1], ["bar"]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
names=["L1", "L2", "L0"],
)
expected = Series(np.arange(6), index=e_idx)
tm.assert_series_equal(result, expected)
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
s = Series(list(range(len(mi))), index=mi)
result = s.rename_axis(index={"ll": "foo"})
assert result.index.names == ["foo", "nn"]
result = s.rename_axis(index=str.upper, axis=0)
assert result.index.names == ["LL", "NN"]
result = s.rename_axis(index=["foo", "goo"])
assert result.index.names == ["foo", "goo"]
with pytest.raises(TypeError, match="unexpected"):
s.rename_axis(columns="wrong")
def test_rename_axis_inplace(self, datetime_series):
# GH 15704
expected = datetime_series.rename_axis("foo")
result = datetime_series
no_return = result.rename_axis("foo", inplace=True)
assert no_return is None
|
tm.assert_series_equal(result, expected)
|
pandas._testing.assert_series_equal
|
import pandas as pd
import re
default_units = {'speed': 'km/h',
'distance': 'km',
'weight': 'kg',
'height': 'cm'}
units_conversions = {}
# Team 2
def convert_time(time: str, time_format: str = None, mode: str = 'flag'):
"""
Converts string with time into pd.Timedelta object
Parameters
----------
time: string
time_format: string
if mode = flag : valid flags for pd.to_datetime function
if mode = regex : raw string with regex. Each unit of time to be included in the result
should be named group of regex. One of the following values should be used as a key:
{'days', 'seconds', 'microseconds', 'milliseconds', 'minutes', 'hours', 'weeks'}
Example of valid regex:
r"(?P<hours>[\d]{0,2})\:?(?P<minutes>[\d]{2})\:(?P<seconds>[\d]{2}).[\d]{3}"
default = None : Default call of to_timedelta function without flags
mode: string with value 'flag' or 'regex'
flag for using flag mode (matching time using flags recognised by pd.to_datetime),
regex for using regex patterns matching time
Returns
-------
pd.Timedelta object if operation was successful, else returns time parameter as a string.
"""
possible_keys = {'days', 'seconds', 'microseconds', 'milliseconds', 'minutes', 'hours', 'weeks'}
if mode == 'flag':
try:
dt_time = pd.to_datetime(time, 'ignore', format=time_format)
try:
return pd.to_timedelta(dt_time)
except ValueError:
return pd.to_timedelta(str(dt_time.time()))
except (TypeError, AttributeError):
return str(time)
elif mode == 'regex':
if re_compiler(time_format) is True:
try:
time_dict = re.search(time_format, time).groupdict()
time_dict = dict_comprehension(possible_keys, time_dict)
return
|
pd.Timedelta(**time_dict)
|
pandas.Timedelta
|
from datetime import datetime
import operator
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, bdate_range
import pandas._testing as tm
from pandas.core import ops
class TestSeriesLogicalOps:
@pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
tm.assert_series_equal(result, expected)
def test_logical_operators_bool_dtype_with_empty(self):
# GH#9016: support bitwise op for integer types
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_empty = Series([], dtype=object)
res = s_tft & s_empty
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_dtype(self):
# GH#9016: support bitwise op for integer types
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype="int64")
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_0123 & s_3333
expected = Series(range(4), dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype="int64")
tm.assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype="int8")
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype="int32")
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_scalar(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
res = s_0123 & 0
expected = Series([0] * 4)
tm.assert_series_equal(res, expected)
res = s_0123 & 1
expected = Series([0, 1, 0, 1])
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_float(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_0123 & np.NaN
with pytest.raises(TypeError, match=msg):
s_0123 & 3.14
msg = "unsupported operand type.+for &:"
with pytest.raises(TypeError, match=msg):
s_0123 & [0.1, 4, 3.14, 2]
with pytest.raises(TypeError, match=msg):
s_0123 & np.array([0.1, 4, 3.14, 2])
with pytest.raises(TypeError, match=msg):
s_0123 & Series([0.1, 4, -3.14, 2])
def test_logical_operators_int_dtype_with_str(self):
s_1111 = Series([1] * 4, dtype="int8")
msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_1111 & "a"
with pytest.raises(TypeError, match="unsupported operand.+for &"):
s_1111 & ["a", "b", "c", "d"]
def test_logical_operators_int_dtype_with_bool(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
expected = Series([False] * 4)
result = s_0123 & False
tm.assert_series_equal(result, expected)
result = s_0123 & [False]
tm.assert_series_equal(result, expected)
result = s_0123 & (False,)
tm.assert_series_equal(result, expected)
result = s_0123 ^ False
expected = Series([False, True, True, True])
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_object(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
result = s_0123 &
|
Series([False, np.NaN, False, False])
|
pandas.Series
|
from collections import Counter
from datetime import datetime, timedelta
import pandas as pd
from django.core.validators import FileExtensionValidator
from django.db import models
from django.utils.translation import gettext as _
from etat_civil.geonames_place.models import Place
from model_utils.models import TimeStampedModel
class BaseAL(TimeStampedModel):
title = models.CharField(max_length=128, unique=True)
class Meta:
abstract = True
ordering = ["title"]
def __str__(self):
return self.title
class Data(TimeStampedModel):
title = models.CharField(max_length=64, unique=True)
data = models.FileField(
upload_to="uploads/data/",
validators=[FileExtensionValidator(allowed_extensions=["xlsx"])],
)
locations_df = None
class Meta:
verbose_name_plural = "Data"
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.locations_df = self.get_data_sheet("locations").set_index("display_name")
super().save(*args, **kwargs)
def load_data(self, delete=False):
if not self.data:
return False
if self.delete:
self.sources.all().delete()
births_df = self.get_data_sheet("births")
self.load_births(births_df)
marriages_df = self.get_data_sheet("marriages")
self.load_marriages(marriages_df)
deaths_df = self.get_data_sheet("deaths")
self.load_deaths(deaths_df)
return True
def get_data_sheet(self, sheet_name):
if not sheet_name:
return None
df = pd.read_excel(self.data, engine="openpyxl", sheet_name=sheet_name)
df = df.dropna(how="all")
df = self.convert_date_columns(df)
return df
def convert_date_columns(self, df):
if df is None:
return None
date_columns = filter(lambda x: "date" in x, df.columns)
for c in date_columns:
df[c] = pd.to_datetime(df[c], errors="ignore")
return df
def load_births(self, births_df):
return self.load_deed(births_df, Deed.load_birth_deed)
def load_deed(self, df, load_func):
if df is None:
return False
for index, row in df.iterrows():
try:
source = Source.load_source(
self, row["classmark"], row["classmark_microfilm"]
)
if source:
load_func(self, source, row)
except Exception as e: # noqa
print(load_func, index, e)
continue
return True
def load_marriages(self, marriages_df):
return self.load_deed(marriages_df, Deed.load_marriage_deed)
def load_deaths(self, deaths_df):
return self.load_deed(deaths_df, Deed.load_death_deed)
def get_place(self, name):
"""Returns a geonames place and a return code, and updates the internal
place name cache, `locations_df`, when new places get a `geonames_id`.
Code 0, the place was found in the location cache; code 1, the place was
searched in geonames by geonames id; code 2, the place was searched in
geonames by name; code 3, the place was searched in geonames by lat, lon;
code -1, the place was not in the cache and was searched in geonames by
name."""
if not name:
return None, -1
code = 0
name = name.strip()
address = name
try:
location = self.locations_df.loc[name]
address = location["location"].strip()
geonames_id = location["geonames_id"]
if pd.notnull(geonames_id):
# get place by geonames id
place, created = Place.objects.get_or_create(
geonames_id=int(geonames_id)
)
if created:
code = 1
elif pd.notnull(location["lat"]) and
|
pd.notnull(location["lon"])
|
pandas.notnull
|
#! usr/bin/env python
import numpy as np
import pandas as pd
import ase.io
import ase
from ase import atoms
import networkx as nx
from periodictable import elements
import featpro.utils as utils
from featpro.pos import connectivity
from itertools import islice
#=======================================================================
def extract_geom(file_name, out_file = 0):
informat = file_name.rsplit(".",1)[-1]
if informat != 'out':
raise TypeError('The input file format must be .out')
if not out_file:
out_file = file_name.rsplit(".",1)[0] + ".geom.com"
outformat = out_file.rsplit(".",1)[-1]
if outformat !='com' and outformat !='gjf' and outformat !='xyz':
raise TypeError('The output format must be .com, .gjf, or .xyz')
# open the input file and read a route section
lines = open(file_name).readlines()
for idx, line in enumerate(lines):
if "#" in line:
idx_route = idx
break
route = []
for line in lines[idx_route:]:
route.append(line.strip('\n'))
if "----------------------------" in line:
break
route = route[:-1]
for idx in range(len(route)):
if route[idx].startswith(' '):
route[idx] = route[idx][1:]
routeStr = ""
routeStr = routeStr.join(route)
if "geom=connectivity" in routeStr:
routeStr = routeStr.replace("geom=connectivity","")
oniom = 0
if ("oniom" or "ONIOM" or "Oniom") in routeStr:
oniom += 1
# Read charge and multiplicity data
for idx, line in enumerate(lines):
if "Charge =" in line:
idx_charge = idx
break
if oniom == 0:
charge, multiplicity = lines[idx_charge].split()[2], lines[idx_charge].split()[5]
charAndMult = "{} {}".format(charge, multiplicity)
if oniom == 1:
for idx, line in enumerate(lines[idx_charge:]):
if not "Charge" in line:
idx_charge_end = idx_charge+idx
break
charAndMult = []
for line in lines[idx_charge: idx_charge_end]:
charAndMult.append(str(line.split()[2]))
charAndMult.append(str(line.split()[5]))
charAndMult = " ".join(charAndMult)
# Read oniom layer data
if oniom == 1:
iniGeom = []
for line in lines[idx_charge_end:]:
if len(line) <= 2: break
iniGeom.append(line.split())
iniGeom = list(filter(None, iniGeom))
oniom_layer = []
for line in iniGeom:
oniom_layer.append(line[5])
# Read final geometry, convert format, and write a new file
for idx, line in enumerate(reversed(lines)):
if "Coordinates (Angstroms)" in line:
break
idx_geo = len(lines)-idx-1
geom = []
for line in lines[idx_geo+3:]:
geom.append(line.split())
if "----------------------------" in line:
break
df_geom =
|
pd.DataFrame(geom[:-1], columns = ['index','atomic number','atomic type','X','Y','Z'])
|
pandas.DataFrame
|
import pandas as pd
class SeasonalNaive:
def __init__(self):
self.data = None
self.resolution = None
self.period = None
self.num_periods = None
self.column = None
self.options = None
def _set_default_options(self):
if self.options is None:
self.options = {}
self.resolution = pd.to_timedelta(self.options['resolution'])
self.column = self.data.columns[0]
if 'period' not in self.options:
self.options['period'] = '1 day'
if 'num_periods' not in self.options:
self.options['num_periods'] = 1
def fit(self, data, options=None):
# super().fit(data, options)
self.data = data
self.options = options
# Ensure defaults are set for any remaining unspecified options
self._set_default_options()
self.period =
|
pd.to_timedelta(self.options['period'])
|
pandas.to_timedelta
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A script to create the CSV file called "silviculture.csv" by extracting it from the "silviculture.sas" file.
Typically you would run this file from a command line like this:
ipython3.exe -i -- /deploy/cbmcfs3_runner/scripts/orig/export_silviculture.py
"""
# Built-in modules #
import re
# Third party modules #
from tqdm import tqdm
from six import StringIO
import pandas
# First party modules #
from autopaths.auto_paths import AutoPaths
from plumbing.common import camel_to_snake
# Internal modules #
from cbmcfs3_runner.core.continent import continent
# Constants #
###############################################################################
class ExportFromSilviculture(object):
"""
This class takes the file "silviculture.sas" as input and generates a CSV
from it.
"""
all_paths = """
/orig/silviculture.sas
/orig/silv_treatments.csv
/orig/harvest_corr_fact.csv
/orig/harvest_prop_fact.csv
"""
def __init__(self, country):
# Default attributes #
self.country = country
# Automatically access paths based on a string of many subpaths #
self.paths = AutoPaths(self.country.data_dir, self.all_paths)
def __call__(self):
self.treatments()
self.harvest_corr_fact()
self.harvest_prop_fact()
def treatments(self):
"""
Search the SAS file for the CSV that is hidden inside and return a
pandas DataFrame. Yes, you heard that correctly, the SAS file has
a CSV hidden somewhere in the middle under plain text format.
This data frame will later be used to generate disturbances from the
economic demand.
"""
# Our regular expression #
query = '\n {3}input (.*?);\n {3}datalines;\n\n(.*?)\n;\nrun'
# Search in the file #
column_names, all_rows = re.findall(query, self.paths.sas.contents, re.DOTALL)[0]
# Format the column_names #
column_names = [name.strip('$') for name in column_names.split()]
# Follow the snake case standard #
column_names = [camel_to_snake(name) for name in column_names]
# Place the rows (content) into a virtual file to be read #
all_rows = StringIO(all_rows)
# Parse into a data frame #
df = pandas.read_csv(all_rows, names=column_names, delim_whitespace=True)
# Lower case a specific column #
df['hwp'] = df['hwp'].str.lower()
# Write back into a CSV #
df.to_csv(str(self.paths.treatments), index=False)
def harvest_corr_fact(self):
"""
There is actually an other hard-coded info inside the SAS file
that we need.
This method will extract a list of "harvest correction factors"
in CSV format. We can spot the location in the file by searching for
the string <if _2='>
These corrections factors will be applied when creating new
disturbances to adjust which forest type is harvest first.
Obviously, coefficients are different in the different countries.
if _2='FS' then CF=1.2;
if _2='QR' then CF=0.9;
...
This fails for DK, GR, HR, IE, LU, PT, ZZ.
"""
# Search in the file #
lines = [line for line in self.paths.sas if "if _2='" in str(line)]
# Do each line #
query = "if _2='([A-Z][A-Z])' then CF=([0-9].[0-9]+);"
extract = lambda line: re.findall(query, str(line))
result = list(map(extract, lines))
result = [found[0] for found in result if found]
# Make a data frame #
df =
|
pandas.DataFrame(result, columns=['forest_type', 'corr_fact'])
|
pandas.DataFrame
|
""" Experiment with multiprocessing an enormous dataframe. """
import os
from multiprocessing import Pool
from numpy.random import default_rng
import numpy as np
import pandas as pd # type:ignore
import psutil # type:ignore
from multiprocessing import set_start_method
def worker_fn(chunk):
""" Scoring goes here. """
result_1 = []
result_2 = []
# TODO pick cols by name.
for i,row in chunk.iterrows():
result_1.append(np.median(row.values))
result_2.append(np.mean(row.values))
print(f"worker {os.getpid()} finished "
f"RSS (GB) {psutil.Process(os.getpid()).memory_info().rss/1e9:5.2f} "
f"start {min(chunk.index):10d} end {max(chunk.index):10d}")
return (result_1, result_2)
def chunks(x, chunk_size):
for i in range(0, len(x), chunk_size):
yield x[i:i + chunk_size]
def make_df(cols = 40, rows = 100000000):
big_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import pytest
from kgextension.endpoints import DBpedia
from kgextension.schema_matching import (
relational_matching,
label_schema_matching,
value_overlap_matching,
string_similarity_matching
)
class TestRelationalMatching:
def test1_default(self):
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test2_no_matches(self):
path_input = "test/data/schema_matching/no_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/no_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test3_uri_querier(self):
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df, uri_data_model=True)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test4_uri_querier_no_matches(self):
path_input = "test/data/schema_matching/no_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/no_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df, uri_data_model=True)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test5_match_score(self):
score = 0.76
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
expected_matches['value'] = np.where(
expected_matches['value']==1, score, expected_matches['value'])
output_matches = relational_matching(df, match_score=score)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test6_one_endpoint(self):
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df, endpoints=DBpedia)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test7_no_http_input(self):
df = pd.DataFrame({'a': [1, 2, 3],
'b': [4, 5, 6]})
expected_matches = pd.DataFrame(columns=["uri_1", "uri_2", "value"])
output_matches = relational_matching(df)
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
class TestStringSimilarityMatching():
def test1_default(self):
path_input = "test/data/schema_matching/string_matching_input_t1t2.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/string_matching_output_t1.csv"
result_expected = pd.read_csv(path_expected)
result = string_similarity_matching(df, prefix_threshold=1)
pd.testing.assert_frame_equal(result, result_expected, check_like=True)
def test2_highthreshold(self):
path_input = "test/data/schema_matching/string_matching_input_t1t2.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/string_matching_output_t2.csv"
result_expected = pd.read_csv(path_expected)
result = string_similarity_matching(df, prefix_threshold=10)
pd.testing.assert_frame_equal(result, result_expected, check_like=True)
def test3_diffpredicate_diffmetric(self):
path_input = "test/data/schema_matching/string_matching_input_t3.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/string_matching_output_t3.csv"
result_expected = pd.read_csv(path_expected)
result = string_similarity_matching(df, predicate="dbo:abstract", to_lowercase=False, remove_prefixes=False, remove_punctuation=False, similarity_metric="token_set_levenshtein")
pd.testing.assert_frame_equal(result, result_expected, check_like=True)
class TestLabelSchemaMatching:
def test1_default(self):
path_input = "test/data/schema_matching/default_matches_cities_boolean_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_boolean_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = label_schema_matching(df)
output_matches['same_label'] = pd.to_numeric(output_matches['same_label'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test2_no_matches(self):
path_input = "test/data/schema_matching/no_matches_cities_boolean_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/no_matches_cities_boolean_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = label_schema_matching(df)
output_matches['same_label'] = pd.to_numeric(output_matches['same_label'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test3_uri_querier(self):
path_input = "test/data/schema_matching/default_matches_cities_boolean_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_boolean_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = label_schema_matching(df, uri_data_model=True)
output_matches['same_label'] = pd.to_numeric(output_matches['same_label'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test4_uri_querier_no_matches(self):
path_input = "test/data/schema_matching/no_matches_cities_boolean_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/no_matches_cities_boolean_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = label_schema_matching(df, uri_data_model=True)
output_matches['same_label'] = pd.to_numeric(output_matches['same_label'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
class TestValueOverlapMatching:
def test1_boolean_data(self):
path_input = "test/data/schema_matching/default_matches_cities_boolean_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/value_matches_cities_boolean_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = value_overlap_matching(df)
output_matches['value_overlap'] =
|
pd.to_numeric(output_matches['value_overlap'])
|
pandas.to_numeric
|
import logging
import os
import pandas as pd
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Count
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from django.views import generic
from django.http import HttpResponse
from django.conf import settings
from sickle import Sickle
import xml.etree.ElementTree as ET
import requests
from backend.models import Goal, SiddataUser, SiddataUserStudy, Degree, Subject, Activity
from recommenders.recommender_functions import get_active_recommenders
# from dashboard.create_chart import build_eval_df
from dashboard import create_plots
from django.utils.text import slugify
def home(request):
"""Start page.
:param request The view's request object."""
logger = logging.getLogger("backend.views.home")
logger.debug("Home loaded...")
page = 'home' # for highlighting navbar entry
return render(request, "backend/home.html")
@login_required
def list_goals(request):
"""Display all goals.
:param request The view's request object.
:returns renderable response from form template."""
page = 'list_goals' # for highlighting navbar entry
goals = Goal.objects.all().order_by('-makedate') # fetch all goals
num_goals = Goal.objects.count()
num_users = SiddataUser.objects.count()
degrees = Degree.objects.annotate(num=Count('siddatauserstudy')).order_by('name')
subjects = Subject.objects.annotate(num=Count('siddatauserstudy')).order_by('name')
return render(request, "backend/goallist.html", locals())
@login_required
def goals_data(request):
subjects = [] # subjects filter
degrees = [] # degrees filter
for param_key in request.POST:
if param_key.startswith('subject-'): # subjects to include
try:
subjects.append(Subject.objects.get(pk=int(param_key.split("-", 1)[1])))
except ValueError: # malformed id
continue
if param_key.startswith('degree-'): # competencies to include
try:
degrees.append(Degree.objects.get(pk=param_key.split("-", 1)[1]))
except ValueError:
continue
all_goals = Goal.objects.all()
if subjects:
all_goals = all_goals.filter(userrecommender__user__siddatauserstudy__subject__in=subjects)
if degrees:
all_goals = all_goals.filter(userrecommender__user_siddatauserstudy__degree__in=degrees)
if 'nopaging' in request.POST: # all on one page...
wpp = len(all_goals) # goals per page
page = 1
else:
wpp = 25
page = request.POST.get('page')
paginator = Paginator(all_goals, wpp) # Show 25 contacts per page
try:
goals = paginator.page(page)
offset = (int(page) - 1) * 25
except PageNotAnInteger:
# If page is not an integer, deliver first page.
goals = paginator.page(1)
offset = 0
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
goals = paginator.page(paginator.num_pages)
offset = (paginator.num_pages - 1) * 25
return render(request, 'backend/goals_data.html', locals())
@login_required
def goal_form(request):
"""Display goal form.
:param request The view's request object.
:returns renderable response from form template."""
page = 'goal_form' # for highlighting navbar entry
goals = Goal.objects.all()
return render(request, "backend/goalform.html", locals())
@login_required
def add_goal(request):
goal = request.POST.get("goal", None)
if goal:
u = SiddataUser.objects.get_or_create(origin_id='internal', origin='internal')[0]
for study in [{"degree": {"name": "Bachelor of Science (B.Sc.)", "degree_origin_id": "14"},
"subject": {"name": "Cognitive Science", "subject_origin_id": "729"},
"semester": 3}]:
us = SiddataUserStudy.objects.get_or_create(user=u,
degree_id=Degree.objects.get(name=study['degree']['name'],
degree_origin_id=study['degree'][
'degree_origin_id']).pk,
subject_id=Subject.objects.get(name=study['subject']['name'],
subject_origin_id=
study['subject'][
'subject_origin_id']).pk,
semester=study['semester'])[0]
g = Goal(goal=goal, userrecommender__user=u)
g.save()
messages.info(request, "Ziel '{}' angelegt.".format(goal))
else:
messages.warning(request, 'Ziel konnte nicht angelegt werden.')
return redirect('list_goals')
def backend_js(request):
return render(request, "backend/backend.js", locals())
@login_required
def backdoor(request, userid=None, view=None, list=None):
"""Provide simple client interface in Django backend."""
if not userid:
user = None # perhaps there is no user yet
username = request.GET.get('user', 'test001') # default: test001
originname = request.GET.get('origin', 'UOS') # default: UOS
else:
user = get_object_or_404(SiddataUser, pk=userid)
username = user.user_origin_id
originname = user.origin.api_endpoint
if not view and not list:
view = 'stats'
# fetch data via API
# this would not strictly be necessary because we have database access, but we want to use the standard api mechanisms
# e.g. for creating new users
import requests, json
r = requests.get('http://localhost:8000/backend/api/lists?origin={}&user_origin_id={}'.format(originname, username))
json_result = json.loads(r.text)
result = [json_result['lists'][x] for x in json_result['lists'].keys()] # reorganize as list of list dictionaries (TODO: Sorting)
lists = json_result['lists'].keys()
if not user: # data passed from form, perhaps new user?
user = get_object_or_404(SiddataUser, origin__api_endpoint=originname, user_origin_id=username)
if view == 'stats':
# fetch some statictics from database
num_recommenders = Goal.objects.filter(userrecommender__user=user).distinct('recommender').count() # number of active recommenders
num_goals = Goal.objects.filter(userrecommender__user=user).count() # number of goals for user
num_activities = Activity.objects.filter(goal__userrecommender__user=user).count() # number of activities for user
num_done_activities = Activity.objects.filter(goal__userrecommender__user=user, status="done").count() # number of activities for user
recommenders = get_active_recommenders()
# one template for everything
return render(request, "backend/backdoor.html", locals())
@login_required
def backdoor_interact(request, userid):
"""Receive and handle interaction feedback for backdoor client."""
type = request.GET.get("type", None) # determine type of interaction (question or ...)
user = get_object_or_404(SiddataUser, pk=userid)
if type == 'question':
activity_id = request.GET.get("activity_id", None)
answers = request.GET.get("answers", None)
if activity_id and answers:
data = {'data': {'activity_id': activity_id, 'type': 'Activity', 'attributes': {'answers': [answers], 'status': 'done'}}}
response = requests.patch('http://localhost:8000/backend/api/activity', json=data)
if response.ok:
messages.add_message(request, messages.INFO, 'Answer was processed.')
else: # code >= 400
messages.add_message(request, messages.ERROR, 'Error {} {}: {}'.format(response.status_code, response.reason, response.text))
else: # not all parameters given
messages.add_message(request, messages.WARNING, 'Malformed request (activtiy_id or answers missing for question.'.format(type))
elif type == 'reset':
if user:
uname = "{} ({})".format(user.user_origin_id, user.origin.api_endpoint)
user.delete()
messages.add_message(request, messages.INFO, 'User {} deleted.'.format(uname))
return redirect(reverse('backdoor')) # redirect to start of backdoor
else:
messages.add_message(request, messages.WARNING, 'Unknown user: {}'.format(userid))
else: # unknown type
messages.add_message(request, messages.WARNING, 'Unknown type: {}'.format(type))
next = request.GET.get('next', reverse('backdoor_user', kwargs={'userid':user.id})) # redirect to where we came from (filled by template as GET parameter next ro to user's backddor start)
return redirect(next) # show activities for current user again
def oer(request):
# rest Api test
# response = requests.post(
# url="https://www.twillo.de/edu-sharing/rest/node/v1/nodes/-home-",
# params={"query": "title:*"},
# headers={
# "Accept": "application/json",
# "Content-Type": "application/json",
# },
# )
# print(json.loads(response.content))
# return redirect('home')
# Get all resources by using oai service
sickle = Sickle("https://www.twillo.de/edu-sharing/eduservlet/oai/provider")
records = sickle.ListRecords(metadataPrefix='lom')
ns = {"oai": "http://www.openarchives.org/OAI/2.0/",
"lom": "http://ltsc.ieee.org/xsd/LOM"}
for record in records:
# Parse LOM-XML
root = ET.fromstring(record.raw)
header = root.find("oai:header", ns)
metadata = root.find("oai:metadata/lom:lom", ns)
# Mapping lom to Dublin Core partially based on
# https://www.researchgate.net/figure/Mapping-between-Unqualified-Dublin-Core-and-IEEE-LOM_tbl1_221425064
dcmes = {
"Contributor": list(elem.text for elem in metadata.findall(".//lom:lifeCycle/lom:contribute/lom:entity", namespaces=ns)),
"Coverage": list(elem.text for elem in metadata.findall(".//lom:classification/lom:taxonPath/lom:taxon/lom:entry/string", namespaces=ns)),
"Creator": metadata.findtext(".//lom:metaMetadata/lom:contribute/lom:entity", namespaces=ns),
"Date": header.findtext(".//oai:datestamp", namespaces=ns),
"Description": metadata.findtext(".//lom:general/lom:description/string", namespaces=ns),
"Format": metadata.findtext(".//lom:technical/lom:format", namespaces=ns),
"Identifier": metadata.findtext(".//lom:general/lom:identifier/lom:entry", namespaces=ns),
"Language": metadata.findtext(".//lom:general/lom:language", namespaces=ns),
"Publisher": metadata.findtext(".//lom:lifeCycle/lom:contribute[lom:role='publisher']/lom:entity", namespaces=ns),
"Relation": metadata.findtext(".//lom:technical/lom:location", namespaces=ns),
"Rights": metadata.findtext(".//lom:rights/lom:description/string", namespaces=ns),
"Source": metadata.findtext(".//lom:relation/lom:kind/lom:source", namespaces=ns),
"Subject": list(metadata.find(".//lom:general/lom:keyword", namespaces=ns).itertext()),
"Title": metadata.findtext(".//lom:general/lom:title/string", namespaces=ns),
"Type": list(metadata.find(".//lom:educational/lom:learningResourceType", namespaces=ns).itertext()),
}
print(dcmes)
return redirect('home')
################################### for dashboard ###################################
#path('export_csv', permission_required('auth.view_dashboard')(views.export_csv), name="export_csv"),
def export_csv(request):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="csv_dump.csv"'
charts = create_plots.create()
final_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python3
# std
import copy
import json
import logging
import pandas as pd
from pathlib import PurePath, Path
from typing import Union, Optional
# 3rd
import sqlalchemy
# ours
from clusterking.util.metadata import turn_into_nested_dict, nested_dict
from clusterking.util.log import get_logger
from clusterking.util.cli import handle_overwrite
class DFMD(object):
"""DFMD = DataFrame with MetaData.
This class bundles a pandas dataframe together with metadata and
provides methods to save and load such an object.
"""
def __init__(
self,
path: Optional[Union[str, PurePath]] = None,
log: Optional[Union[str, logging.Logger]] = None,
):
"""
Initialize a DFMD object.
Args:
path: Optional: load from this file (specified as string or
:class:`pathlib.PurePath`)
log: Optional: instance of :py:class:`logging.Logger` or name of
logger to be created
"""
# These are the three attributes of this class
#: This will hold all the configuration that we will write out
self.md = None
#: :py:class:`pandas.DataFrame` to hold all of the results
self.df = None # type: Optional[pd.DataFrame]
#: Instance of :py:class:`logging.Logger`
self.log = None
# todo: remember path?
if not path:
# Initialize blank
self.md = nested_dict()
self.df = pd.DataFrame()
self.log = None
else:
self._load(path)
# Overwrite log if user wants that.
if isinstance(log, logging.Logger):
self.log = log
elif isinstance(log, str):
self.log = get_logger(log)
elif log is None:
if not self.log:
self.log = get_logger("DFMD")
else:
raise ValueError(
"Unsupported type '{}' for 'log' argument.".format(type(log))
)
# **************************************************************************
# Loading
# **************************************************************************
def _load(self, path: Union[str, PurePath]) -> None:
"""Load input file as created by
:py:meth:`~clusterking.data.DFMD.write`.
Args:
path: Path to input file
Returns:
None
"""
path = Path(path)
if not path.is_file():
raise FileNotFoundError("File '{}' doesn't exist.".format(path))
engine = sqlalchemy.create_engine("sqlite:///" + str(path.resolve()))
self.df = pd.read_sql_table("df", engine)
self.df.set_index("index", inplace=True)
md_json = pd.read_sql_table("md", engine)["md"][0]
self.md = turn_into_nested_dict(json.loads(md_json))
# **************************************************************************
# Writing
# **************************************************************************
def write(self, path: Union[str, PurePath], overwrite="ask"):
"""Write output files.
Args:
path: Path to output file
overwrite: How to proceed if output file already exists:
'ask' (ask interactively for approval if we have to overwrite),
'overwrite' (overwrite without asking), 'raise'
(raise Exception if file exists).
Default is 'ask'.
Returns:
None
"""
path = Path(path)
handle_overwrite([path], behavior=overwrite, log=self.log)
if not path.parent.is_dir():
self.log.debug("Creating directory '{}'.".format(path.parent))
path.parent.mkdir(parents=True)
engine = sqlalchemy.create_engine("sqlite:///" + str(path))
self.df.to_sql("df", engine, if_exists="replace")
# todo: perhaps it's better to use pickle in the future?
md_json = json.dumps(self.md, sort_keys=True, indent=4)
md_df =
|
pd.DataFrame({"md": [md_json]})
|
pandas.DataFrame
|
import warnings
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Double,
Integer,
NaturalLanguage,
)
from rayml.pipelines.components import PerColumnImputer
from rayml.utils.woodwork_utils import infer_feature_types
@pytest.fixture
def non_numeric_df():
X = pd.DataFrame(
[
["a", "a", "a", "a"],
["b", "b", "b", "b"],
["a", "a", "a", "a"],
[np.nan, np.nan, np.nan, np.nan],
]
)
X.columns = ["A", "B", "C", "D"]
return X
def test_invalid_parameters():
with pytest.raises(ValueError):
strategies = ("impute_strategy", "mean")
PerColumnImputer(impute_strategies=strategies)
with pytest.raises(ValueError):
strategies = ["mean"]
PerColumnImputer(impute_strategies=strategies)
def test_all_strategies():
X = pd.DataFrame(
{
"A": pd.Series([2, 4, 6, np.nan]),
"B": pd.Series([4, 6, 4, np.nan]),
"C": pd.Series([6, 8, 8, np.nan]),
"D": pd.Series(["a", "a", "b", np.nan]),
}
)
X.ww.init(logical_types={"D": "categorical"})
X_expected = pd.DataFrame(
{
"A": pd.Series([2, 4, 6, 4]),
"B":
|
pd.Series([4, 6, 4, 4])
|
pandas.Series
|
import io
import os
from datetime import datetime, timedelta
from google.cloud import datastore
import pandas as pd
from pytz import timezone
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
import matplotlib
import asyncio
import random
import string
bucket_name = os.environ.get('BUCKET_NAME')
token = os.environ.get('TOKEN')
def ingest_http(request):
from flask import abort
if request.method == 'POST' and request.headers['content-type'] == 'application/json':
json_payload = request.get_json(silent=True)
if json_payload["deviceId"] == token:
datastore_client = datastore.Client()
for tag in json_payload['tags']:
key = datastore_client.key('sensordata')
entity = filll_entity(datastore.Entity(key=key), json_payload, tag)
datastore_client.put(entity)
return "ok"
elif request.method == 'GET' and request.args.get('token') == token:
days = request.args.get('days')
metric = request.args.get('metric')
after_date, date_end = figure_out_date_interval(days)
result = query_data(after_date, metric)
image_data = asyncio.run(generate_images(result, days, metric))
image_names = [image_name for (image_name, image_data) in image_data]
asyncio.run(upload_images(image_data))
return generate_response(after_date, date_end, image_names)
return abort(403)
def figure_out_date_interval(days):
date_end = datetime.now(timezone('Europe/Helsinki'))
date_start = timedelta(days=1 if days is None or 0 else int(days))
after_date = date_end - date_start
return after_date, date_end
def generate_response(after_date, date_end, image_names):
image_html = ''.join(['%s</br><img src="https://storage.cloud.google.com/%s/%s"/><br/>' %
(image_name,bucket_name ,image_name) for image_name in image_names])
date_start_formatted = after_date.strftime('%H:%M %d %b %Y')
date_end_formatted = date_end.strftime('%H:%M %d %b %Y')
return '%s - %s <br/> %s' % (date_start_formatted, date_end_formatted, image_html)
def filll_entity(entity, json_payload, tag):
entity['id'] = tag['id']
entity['name'] = tag['name']
entity['humidity'] = tag['humidity']
entity['pressure'] = tag['pressure']
entity['temperature'] = tag['temperature']
entity['txPower'] = tag['txPower']
entity['voltage'] = tag['voltage']
entity['batteryLevel'] = json_payload['batteryLevel']
entity['updateAt'] = tag['updateAt']
entity['rssi'] = tag['rssi']
return entity
async def generate_images(result, days, metric):
df =
|
pd.DataFrame(result)
|
pandas.DataFrame
|
# Author: KTH dESA Last modified by <NAME>
# Date: 07 February 2019
# Python version: 3.5
import os
import logging
import pandas as pd
from math import ceil, pi, exp, log, sqrt, radians, cos, sin, asin
# from pyproj import Proj
import numpy as np
from collections import defaultdict
from joblib import load
logging.basicConfig(format='%(asctime)s\t\t%(message)s', level=logging.DEBUG)
# General
LHV_DIESEL = 9.9445485 # (kWh/l) lower heating value
HOURS_PER_YEAR = 8760
# Columns in settlements file must match these exactly
SET_COUNTRY = 'Country' # This cannot be changed, lots of code will break
SET_X = 'X' # Coordinate in kilometres
SET_Y = 'Y' # Coordinate in kilometres
SET_X_DEG = 'X_deg' # Coordinates in degrees
SET_Y_DEG = 'Y_deg'
SET_POP = 'Pop' # Population in people per point (equally, people per km2)
SET_POP_CALIB = 'PopStartYear' # Calibrated population to reference year, same units
SET_POP_FUTURE = 'PopEndYear' # Project future population, same units
SET_GRID_DIST_CURRENT = 'GridDistCurrent' # Distance in km from current grid
SET_GRID_DIST_PLANNED = 'GridDistPlan' # Distance in km from current and future grid
SET_ROAD_DIST = 'RoadDist' # Distance in km from road network
SET_NIGHT_LIGHTS = 'NightLights' # Intensity of night time lights (from NASA), range 0 - 63
SET_TRAVEL_HOURS = 'TravelHours' # Travel time to large city in hours
SET_GHI = 'GHI' # Global horizontal irradiance in kWh/m2/day
SET_WINDVEL = 'WindVel' # Wind velocity in m/s
SET_WINDCF = 'WindCF' # Wind capacity factor as percentage (range 0 - 1)
SET_HYDRO = 'Hydropower' # Hydropower potential in kW
SET_HYDRO_DIST = 'HydropowerDist' # Distance to hydropower site in km
SET_HYDRO_FID = 'HydropowerFID' # the unique tag for eah hydropower, to not over-utilise
SET_SUBSTATION_DIST = 'SubstationDist'
SET_ELEVATION = 'Elevation' # in metres
SET_SLOPE = 'Slope' # in degrees
SET_LAND_COVER = 'LandCover'
SET_ROAD_DIST_CLASSIFIED = 'RoadDistClassified'
SET_SUBSTATION_DIST_CLASSIFIED = 'SubstationDistClassified'
SET_ELEVATION_CLASSIFIED = 'ElevationClassified'
SET_SLOPE_CLASSIFIED = 'SlopeClassified'
SET_LAND_COVER_CLASSIFIED = 'LandCoverClassified'
SET_COMBINED_CLASSIFICATION = 'GridClassification'
SET_GRID_PENALTY = 'GridPenalty'
SET_URBAN = 'IsUrban' # Whether the site is urban (0 or 1)
SET_ENERGY_PER_CELL = 'EnergyPerSettlement'
SET_NUM_PEOPLE_PER_HH = 'NumPeoplePerHH'
SET_ELEC_CURRENT = 'ElecStart' # If the site is currently electrified (0 or 1)
SET_ELEC_FUTURE = 'Elec_Status' # If the site has the potential to be 'easily' electrified in future
SET_ELEC_FUTURE_GRID = "Elec_Initial_Status_Grid"
SET_ELEC_FUTURE_OFFGRID = "Elec_Init_Status_Offgrid"
SET_ELEC_FUTURE_ACTUAL = "Actual_Elec_Status_"
SET_ELEC_FINAL_GRID = "GridElecIn"
SET_ELEC_FINAL_OFFGRID = "OffGridElecIn"
SET_NEW_CONNECTIONS = 'NewConnections' # Number of new people with electricity connections
SET_MIN_GRID_DIST = 'MinGridDist'
SET_LCOE_GRID = 'Grid_extension' # All lcoes in USD/kWh
SET_LCOE_SA_PV = 'SA_PV'
SET_LCOE_SA_DIESEL = 'SA_Diesel'
SET_LCOE_MG_WIND = 'MG_Wind'
SET_LCOE_MG_DIESEL = 'MG_Diesel'
SET_LCOE_MG_PV = 'MG_PV'
SET_LCOE_MG_HYDRO = 'MG_Hydro'
SET_GRID_LCOE_Round1 = "Grid_lcoe_PreElec"
SET_MIN_OFFGRID = 'Minimum_Tech_Off_grid' # The technology with lowest lcoe (excluding grid)
SET_MIN_OVERALL = 'MinimumOverall' # Same as above, but including grid
SET_MIN_OFFGRID_LCOE = 'Minimum_LCOE_Off_grid' # The lcoe value for minimum tech
SET_MIN_OVERALL_LCOE = 'MinimumOverallLCOE' # The lcoe value for overall minimum
SET_MIN_OVERALL_CODE = 'MinimumOverallCode' # And a code from 1 - 7 to represent that option
SET_MIN_CATEGORY = 'MinimumCategory' # The category with minimum lcoe (grid, minigrid or standalone)
SET_NEW_CAPACITY = 'NewCapacity' # Capacity in kW
SET_INVESTMENT_COST = 'InvestmentCost' # The investment cost in USD
SET_INVESTMENT_COST_OFFGRID = "InvestmentOffGrid"
SET_CONFLICT = "Conflict"
SET_ELEC_ORDER = "ElectrificationOrder"
SET_DYNAMIC_ORDER = "Electrification_Wave"
SET_LIMIT = "ElecStatusIn"
SET_GRID_REACH_YEAR = "GridReachYear"
SET_MIN_OFFGRID_CODE = "Off_Grid_Code"
SET_ELEC_FINAL_CODE = "FinalElecCode"
SET_DIST_TO_TRANS = "TransformerDist"
SET_TOTAL_ENERGY_PER_CELL = "TotalEnergyPerCell" # all previous + current timestep
SET_RESIDENTIAL_DEMAND = "ResidentialDemand"
SET_AGRI_DEMAND = "AgriDemand"
SET_HEALTH_DEMAND = "HealthDemand"
SET_EDU_DEMAND = "EducationDemand"
SET_COMMERCIAL_DEMAND = "CommercialDemand"
SET_GRID_CELL_AREA = 'GridCellArea'
SET_MV_CONNECT_DIST = 'MVConnectDist'
SET_HV_DIST_CURRENT = 'CurrentHVLineDist'
SET_HV_DIST_PLANNED = 'PlannedHVLineDist'
SET_MV_DIST_CURRENT = 'CurrentMVLineDist'
SET_MV_DIST_PLANNED = 'PlannedMVLineDist'
SET_ELEC_POP = 'ElecPop'
SET_WTFtier = "ResidentialDemandTier"
SET_INVEST_PER_CAPITA = "InvestmentCapita"
# Columns in the specs file must match these exactly
SPE_COUNTRY = 'Country'
SPE_POP = 'PopStartYear' # The actual population in the base year
SPE_URBAN = 'UrbanRatioStartYear' # The ratio of urban population (range 0 - 1) in base year
SPE_POP_FUTURE = 'PopEndYear'
SPE_URBAN_FUTURE = 'UrbanRatioEndYear'
SPE_URBAN_MODELLED = 'UrbanRatioModelled' # The urban ratio in the model after calibration (for comparison)
SPE_URBAN_CUTOFF = 'UrbanCutOff' # The urban cutoff population calirated by the model, in people per km2
SPE_URBAN_GROWTH = 'UrbanGrowth' # The urban growth rate as a simple multplier (urban pop future / urban pop present)
SPE_RURAL_GROWTH = 'RuralGrowth' # Same as for urban
SPE_NUM_PEOPLE_PER_HH_RURAL = 'NumPeoplePerHHRural'
SPE_NUM_PEOPLE_PER_HH_URBAN = 'NumPeoplePerHHUrban'
SPE_DIESEL_PRICE_LOW = 'DieselPriceLow' # Diesel price in USD/litre
SPE_DIESEL_PRICE_HIGH = 'DieselPriceHigh' # Same, with a high forecast var
SPE_GRID_PRICE = 'GridPrice' # Grid price of electricity in USD/kWh
SPE_GRID_CAPACITY_INVESTMENT = 'GridCapacityInvestmentCost' # grid capacity investments costs from TEMBA USD/kW
SPE_GRID_LOSSES = 'GridLosses' # As a ratio (0 - 1)
SPE_BASE_TO_PEAK = 'BaseToPeak' # As a ratio (0 - 1)
SPE_EXISTING_GRID_COST_RATIO = 'ExistingGridCostRatio'
SPE_MAX_GRID_DIST = 'MaxGridDist'
SPE_ELEC = 'ElecActual' # Actual current percentage electrified population (0 - 1)
SPE_ELEC_MODELLED = 'ElecModelled' # The modelled version after calibration (for comparison)
SPE_ELEC_URBAN = 'Urban_elec_ratio' # Actual electrification for urban areas
SPE_ELEC_RURAL = 'Rural_elec_ratio' # Actual electrification for rural areas
SPE_MIN_NIGHT_LIGHTS = 'MinNightLights'
SPE_MAX_GRID_EXTENSION_DIST = 'MaxGridExtensionDist'
SPE_MAX_ROAD_DIST = 'MaxRoadDist'
SPE_POP_CUTOFF1 = 'PopCutOffRoundOne'
SPE_POP_CUTOFF2 = 'PopCutOffRoundTwo'
SPE_ELEC_LIMIT = "ElecLimit"
SPE_INVEST_LIMIT = "InvestmentLimit"
SPE_DIST_TO_TRANS = "DistToTrans"
SPE_START_YEAR = "StartYear"
SPE_END_YEAR = "EndYEar"
SPE_TIMESTEP = "TimeStep"
class Technology:
"""
Used to define the parameters for each electricity access technology, and to calculate the LCOE depending on
input parameters.
"""
def __init__(self,
tech_life, # in years
base_to_peak_load_ratio,
distribution_losses=0, # percentage
connection_cost_per_hh=0, # USD/hh
om_costs=0.0, # OM costs as percentage of capital costs
capital_cost=0, # USD/kW
capacity_factor=0.9, # percentage
grid_penalty_ratio=1, # multiplier
efficiency=1.0, # percentage
diesel_price=0.0, # USD/litre
grid_price=0.0, # USD/kWh for grid electricity
standalone=False,
existing_grid_cost_ratio=0.1, # percentage
grid_capacity_investment=0.0, # USD/kW for on-grid capacity investments (excluding grid itself)
diesel_truck_consumption=0, # litres/hour
diesel_truck_volume=0, # litres
om_of_td_lines=0): # percentage
self.distribution_losses = distribution_losses
self.connection_cost_per_hh = connection_cost_per_hh
self.base_to_peak_load_ratio = base_to_peak_load_ratio
self.tech_life = tech_life
self.om_costs = om_costs
self.capital_cost = capital_cost
self.capacity_factor = capacity_factor
self.grid_penalty_ratio = grid_penalty_ratio
self.efficiency = efficiency
self.diesel_price = diesel_price
self.grid_price = grid_price
self.standalone = standalone
self.existing_grid_cost_ratio = existing_grid_cost_ratio
self.grid_capacity_investment = grid_capacity_investment
self.diesel_truck_consumption = diesel_truck_consumption
self.diesel_truck_volume = diesel_truck_volume
self.om_of_td_lines = om_of_td_lines
@classmethod
def set_default_values(cls, base_year, start_year, end_year, discount_rate):
cls.base_year = base_year
cls.start_year = start_year
cls.end_year = end_year
cls.discount_rate = discount_rate
def get_lcoe(self, energy_per_cell, people, num_people_per_hh, start_year, end_year, new_connections,
total_energy_per_cell, prev_code, grid_cell_area, conf_status=0, additional_mv_line_length=0,
capacity_factor=0,
grid_penalty_ratio=1, mv_line_length=0, travel_hours=0, elec_loop=0, productive_nodes=0,
additional_transformer=0,
get_investment_cost=False,
get_investment_cost_lv=False, get_investment_cost_mv=False, get_investment_cost_hv=False,
get_investment_cost_transformer=False, get_investment_cost_connection=False,
hybrid=False, GHI=0,get_invesment_transmission=False):
"""
Calculates the LCOE depending on the parameters. Optionally calculates the investment cost instead.
The only required parameters are energy_per_cell, people and num_people_per_hh
additional_mv_line_length required for grid
capacity_factor required for PV and wind
mv_line_length required for hydro
travel_hours required for diesel
"""
# RUN_PARAM: Here are the assumptions related to cost and physical properties of grid extension
#elements
# REVIEW - A final revision is needed before publishing
HV_line_type = 115 # kV
HV_line_cost = 117000 # 99000$/km for 69kV, 117000 $/km for 115 kV
MV_line_type = 33 # kV
MV_line_amperage_limit = 8.0 # Ampere (A)
MV_line_cost = 9000 # $/km for 11-33 kV
LV_line_type = 0.240 # kV
LV_line_cost = 5000 # $/km for 0.4 kV
LV_line_max_length = 0.5 # km
service_Transf_type = 50 # kVa
service_Transf_cost = 3500 # $/unit
max_nodes_per_serv_trans = 300 # maximum number of nodes served by each service transformer
MV_LV_sub_station_type = 400 # kVa
MV_LV_sub_station_cost = 10000 # $/unit
MV_MV_sub_station_cost = 10000 # $/unit
HV_LV_sub_station_type = 1000 # kVa
HV_LV_sub_station_cost = 25000 # $/unit
HV_MV_sub_station_cost = 25000 # $/unit
power_factor = 0.9
load_moment = 9643 # for 50mm aluminum conductor under 5% voltage drop (kW m)
# simultaneous_usage = 0.06 # Not used eventually - maybe in an updated version
if people == 0:
# If there are no people, the investment cost is zero.
if get_investment_cost:
return 0
# Otherwise we set the people low (prevent div/0 error) and continue.
else:
people = 0.00001
if energy_per_cell == 0:
# If there is no demand, the investment cost is zero.
if get_investment_cost:
return 0
# Otherwise we set the people low (prevent div/0 error) and continue.
else:
energy_per_cell = 0.000000000001
if grid_cell_area <= 0:
grid_cell_area = 0.001
if grid_penalty_ratio == 0:
grid_penalty_ratio = self.grid_penalty_ratio
#mg_diesel_calc
# If a new capacity factor isn't given, use the class capacity factor (for hydro, diesel etc)
if capacity_factor == 0:
capacity_factor = self.capacity_factor
def distribution_network(people, energy_per_cell):
# cheking if people and energy per cell variables are lower than 0
if energy_per_cell <= 0:
energy_per_cell = 0.0001
try:
int(energy_per_cell)
except ValueError:
energy_per_cell = 0.0001
if people <= 0:
people = 0.0001
consumption = energy_per_cell # kWh/year
# The demand taking in account distribution losses
average_load = consumption / (1 - self.distribution_losses) / HOURS_PER_YEAR # kW
# Calculation for the peak demand
peak_load = average_load / self.base_to_peak_load_ratio # kW
try:
int(peak_load)
except ValueError:
peak_load = 1
# Sizing HV/MV
# ratio between thecost of HV lines and MV lines
HV_to_MV_lines = HV_line_cost / MV_line_cost #28000/13000=2,15
max_MV_load = MV_line_amperage_limit * MV_line_type * HV_to_MV_lines #8*33*2,15=568,61
MV_km = 0
HV_km = 0
if peak_load <= max_MV_load and additional_mv_line_length < 50 and self.grid_price > 0:
MV_amperage = MV_LV_sub_station_type / MV_line_type #400/33= 12,1
No_of_MV_lines = ceil(peak_load / (MV_amperage * MV_line_type)) #ceil (peak_load/(33*12,1))
MV_km = additional_mv_line_length * No_of_MV_lines
elif self.grid_price > 0:
HV_amperage = HV_LV_sub_station_type / HV_line_type
No_of_HV_lines = ceil(peak_load / (HV_amperage * HV_line_type))
HV_km = additional_mv_line_length * No_of_HV_lines
# Power factors
Smax = peak_load / power_factor #peak_load/0,9
max_tranformer_area = pi * LV_line_max_length ** 2
total_nodes = (people / num_people_per_hh) + productive_nodes
# Transformers
try:
no_of_service_transf = ceil(max(Smax / service_Transf_type, total_nodes / max_nodes_per_serv_trans,
grid_cell_area / max_tranformer_area))
except ValueError:
no_of_service_transf = 1
transformer_radius = ((grid_cell_area / no_of_service_transf) / pi) ** 0.5
transformer_nodes = total_nodes / no_of_service_transf
transformer_load = peak_load / no_of_service_transf
cluster_radius = (grid_cell_area / pi) ** 0.5
# Sizing LV lines in settlement
if 2 / 3 * cluster_radius * transformer_load * 1000 < load_moment:
cluster_lv_lines_length = 2 / 3 * cluster_radius * no_of_service_transf
cluster_mv_lines_length = 0
else:
cluster_lv_lines_length = 0
# cluster_mv_lines_length = 2 / 3 * cluster_radius * no_of_service_transf
cluster_mv_lines_length = 2 * transformer_radius * no_of_service_transf
hh_area = grid_cell_area / total_nodes
hh_diameter = 2 * ((hh_area / pi) ** 0.5)
transformer_lv_lines_length = hh_diameter * total_nodes
No_of_HV_MV_subs = 0
No_of_MV_MV_subs = 0
No_of_HV_LV_subs = 0
No_of_MV_LV_subs = 0
No_of_HV_MV_subs += additional_transformer # to connect the MV line to the HV grid
if cluster_mv_lines_length > 0 and HV_km > 0:
No_of_HV_MV_subs = ceil(peak_load / HV_LV_sub_station_type) # 1
elif cluster_mv_lines_length > 0 and MV_km > 0:
No_of_MV_MV_subs = ceil(peak_load / MV_LV_sub_station_type) # 1
elif cluster_lv_lines_length > 0 and HV_km > 0:
No_of_HV_LV_subs = ceil(peak_load / HV_LV_sub_station_type) # 1
else:
No_of_MV_LV_subs = ceil(peak_load / MV_LV_sub_station_type) # 1
LV_km = cluster_lv_lines_length + transformer_lv_lines_length
MV_km = cluster_mv_lines_length
return HV_km, MV_km, cluster_mv_lines_length, LV_km, no_of_service_transf, \
No_of_HV_MV_subs, No_of_MV_MV_subs, No_of_HV_LV_subs, No_of_MV_LV_subs, \
consumption, peak_load, total_nodes
# here finisih the distribution network function
# REVIEW - Andreas, the new connections column was modified; Please check whether this has created any problem in this version of the code as I find it hard to follow without comments. I assume this is to secure we do not oversize the network .? I believe it is fine though
# si gente es differente a new connections
if people != new_connections and (prev_code == 1 or prev_code == 4 or prev_code == 5 or
prev_code == 6 or prev_code == 7):
# last year
HV_km1, MV_km1, cluster_mv_lines_length1, cluster_lv_lines_length1, no_of_service_transf1, \
No_of_HV_MV_subs1, No_of_MV_MV_subs1, No_of_HV_LV_subs1, No_of_MV_LV_subs1, \
generation_per_year1, peak_load1, total_nodes1 = distribution_network(people, total_energy_per_cell)
# first year
HV_km2, MV_km2, cluster_mv_lines_length2, cluster_lv_lines_length2, no_of_service_transf2, \
No_of_HV_MV_subs2, No_of_MV_MV_subs2, No_of_HV_LV_subs2, No_of_MV_LV_subs2, \
generation_per_year2, peak_load2, total_nodes2 = \
distribution_network(people=(people - new_connections),
energy_per_cell=(total_energy_per_cell - energy_per_cell))
hv_lines_total_length = max(HV_km1 - HV_km2, 0)
mv_lines_connection_length = max(MV_km1 - MV_km2, 0)
mv_lines_distribution_length = max(cluster_lv_lines_length1 - cluster_lv_lines_length2, 0)
total_lv_lines_length = max(cluster_lv_lines_length1 - cluster_lv_lines_length2, 0)
num_transformers = max(no_of_service_transf1 - no_of_service_transf2, 0)
generation_per_year = max(generation_per_year1 - generation_per_year2, 0)
peak_load = max(peak_load1 - peak_load2, 0)
No_of_HV_LV_substation = max(No_of_HV_LV_subs1 - No_of_HV_LV_subs2, 0)
No_of_HV_MV_substation = max(No_of_HV_MV_subs1 - No_of_HV_MV_subs2, 0)
No_of_MV_MV_substation = max(No_of_MV_MV_subs1 - No_of_MV_MV_subs2, 0)
No_of_MV_LV_substation = max(No_of_MV_LV_subs1 - No_of_MV_LV_subs2, 0)
total_nodes = max(total_nodes1 - total_nodes2, 0)
else:
# there could be a mistake here as the total_lv_lines_length do not correspond
# to the return line in the distribution network
hv_lines_total_length, mv_lines_connection_length, mv_lines_distribution_length, \
total_lv_lines_length, num_transformers, \
No_of_HV_MV_substation, No_of_MV_MV_substation, No_of_HV_LV_substation, No_of_MV_LV_substation, \
generation_per_year, peak_load, total_nodes = distribution_network(people, energy_per_cell)
try:
int(conf_status)
except ValueError:
conf_status = 0
conf_grid_pen = {0: 1, 1: 1.1, 2: 1.25, 3: 1.5, 4: 2}
# The investment and O&M costs are different for grid and non-grid solutions
# choosing between on grid-off grid solutions
# mg_diesel_calc
if self.grid_price > 0:
td_investment_cost = (hv_lines_total_length * HV_line_cost * (
1 + self.existing_grid_cost_ratio * elec_loop) +
mv_lines_connection_length * MV_line_cost * (
1 + self.existing_grid_cost_ratio * elec_loop) +
total_lv_lines_length * LV_line_cost +
mv_lines_distribution_length * MV_line_cost +
num_transformers * service_Transf_cost +
total_nodes * self.connection_cost_per_hh +
No_of_HV_LV_substation * HV_LV_sub_station_cost +
No_of_HV_MV_substation * HV_MV_sub_station_cost +
No_of_MV_MV_substation * MV_MV_sub_station_cost +
No_of_MV_LV_substation * MV_LV_sub_station_cost) * conf_grid_pen[conf_status]
td_investment_cost = td_investment_cost * grid_penalty_ratio
td_om_cost = td_investment_cost * self.om_of_td_lines
total_investment_cost = td_investment_cost
total_om_cost = td_om_cost
fuel_cost = self.grid_price # / (1 - self.distribution_losses) REVIEW
else:
# TODO: Possibly add substation here for mini-grids
conflict_sa_pen = {0: 1, 1: 1.03, 2: 1.07, 3: 1.125, 4: 1.25}
conflict_mg_pen = {0: 1, 1: 1.05, 2: 1.125, 3: 1.25, 4: 1.5}
total_lv_lines_length *= 0 if self.standalone else 1
mv_lines_distribution_length *= 0 if self.standalone else 1
# Cost of MV and LV lines
mv_total_line_cost = MV_line_cost * mv_lines_distribution_length * conflict_mg_pen[conf_status]
lv_total_line_cost = LV_line_cost * total_lv_lines_length * conflict_mg_pen[conf_status]
service_transformer_total_cost = 0 if self.standalone else num_transformers * service_Transf_cost * \
conflict_mg_pen[conf_status]
# calculation of the installed capacity
installed_capacity = peak_load / capacity_factor
# Invesment cost for the distribution of electricity
td_investment_cost = mv_total_line_cost + lv_total_line_cost + total_nodes * self.connection_cost_per_hh + service_transformer_total_cost
td_om_cost = td_investment_cost * self.om_of_td_lines * conflict_sa_pen[conf_status] if self.standalone \
else td_investment_cost * self.om_of_td_lines * conflict_mg_pen[conf_status]
# Decide if the thecnology use diesel
if self.standalone and self.diesel_price == 0:
if installed_capacity / (people / num_people_per_hh) < 0.020:
capital_investment = installed_capacity * self.capital_cost[0.020] * conflict_sa_pen[conf_status]
total_om_cost = td_om_cost + (self.capital_cost[0.020] * self.om_costs * conflict_sa_pen[
conf_status] * installed_capacity)
elif installed_capacity / (people / num_people_per_hh) < 0.050:
capital_investment = installed_capacity * self.capital_cost[0.050] * conflict_sa_pen[conf_status]
total_om_cost = td_om_cost + (self.capital_cost[0.050] * self.om_costs * conflict_sa_pen[
conf_status] * installed_capacity)
elif installed_capacity / (people / num_people_per_hh) < 0.100:
capital_investment = installed_capacity * self.capital_cost[0.100] * conflict_sa_pen[conf_status]
total_om_cost = td_om_cost + (self.capital_cost[0.100] * self.om_costs * conflict_sa_pen[
conf_status] * installed_capacity)
elif installed_capacity / (people / num_people_per_hh) < 0.200:
capital_investment = installed_capacity * self.capital_cost[0.200] * conflict_sa_pen[conf_status]
total_om_cost = td_om_cost + (self.capital_cost[0.200] * self.om_costs * conflict_sa_pen[
conf_status] * installed_capacity)
else:
capital_investment = installed_capacity * self.capital_cost[0.300] * conflict_sa_pen[conf_status]
total_om_cost = td_om_cost + (self.capital_cost[0.300] * self.om_costs * conflict_sa_pen[
conf_status] * installed_capacity)
else:
# Cost of invesment of the diesel depending if it is stand alone or microgrid
capital_investment = installed_capacity * self.capital_cost * conflict_sa_pen[
conf_status] if self.standalone \
else installed_capacity * self.capital_cost * conflict_mg_pen[conf_status]
# operation and maintence
total_om_cost = td_om_cost + (self.capital_cost * conflict_sa_pen[conf_status] * self.om_costs *
installed_capacity) if self.standalone \
else td_om_cost + (self.capital_cost * conflict_mg_pen[conf_status] * self.om_costs * installed_capacity)
# adition of the invesment of the technology and the tramisition
# here I have to change
total_investment_cost = td_investment_cost + capital_investment
# If a diesel price has been passed, the technology is diesel
# And we apply the Szabo formula to calculate the transport cost for the diesel
# p = (p_d + 2*p_d*consumption*time/volume)*(1/mu)*(1/LHVd)
# Otherwise it's hydro/wind etc with no fuel cost
conf_diesel_pen = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5} # penalization rate for the diesel
# here I will have to modify
if self.diesel_price > 0:
fuel_cost = (self.diesel_price + 2 * self.diesel_price * self.diesel_truck_consumption * (
travel_hours * conf_diesel_pen[conf_status]) /
self.diesel_truck_volume) / LHV_DIESEL / self.efficiency
else:
fuel_cost = 0
# Perform the time-value LCOE calculation
project_life = int(end_year - self.base_year + 1)
# check base year
reinvest_year = 0
step = int(start_year - self.base_year)
# If the technology life is less than the project life,
#we will have to invest twice to buy it again
# mg_diesel_calc
if self.tech_life + step < project_life:
reinvest_year = self.tech_life + step
# generating the years of the project
year = np.arange(project_life)
# Putting the demand in the years of the project
el_gen = generation_per_year * np.ones(project_life)
el_gen[0:step] = 0 # for re-invesment
# creating the actualization numbers
discount_factor = (1 + self.discount_rate) ** year
investments = np.zeros(project_life)
investments[step] = total_investment_cost
# Calculate the year of re-investment if tech_life is smaller than project life
if reinvest_year:
investments[reinvest_year] = total_investment_cost
# Calculate salvage value if tech_life is bigger than project life
salvage = np.zeros(project_life)
if reinvest_year > 0:
used_life = (project_life - step) - self.tech_life
else:
used_life = project_life - step - 1
salvage[-1] = total_investment_cost * (1 - used_life / self.tech_life)
# salvage[project_life - 1] = total_investment_cost * (1 - used_life / self.tech_life)
operation_and_maintenance = total_om_cost * np.ones(project_life)
operation_and_maintenance[0:step] = 0
# fuel cost of each year
fuel = el_gen * fuel_cost
fuel[0:step] = 0
if hybrid:
# Demand calculation
filename = 'Regressions/demand_regression.joblib'
lm_D = load(filename)
HouseHolds = np.array(people/num_people_per_hh)
HouseHolds = HouseHolds.reshape(-1, 1)
Demand = lm_D.predict(HouseHolds)/(1 - self.distribution_losses)
Data = pd.read_excel('Regressions/Tecno_economic_parameters.xlsx',index_col=0,Header=None)
fuel_cost = (self.diesel_price + 2 * self.diesel_price * self.diesel_truck_consumption * (
travel_hours ) /
self.diesel_truck_volume) # LCOE
Data.loc['Demand','Value'] = Demand
Data.loc['Fuel Cost','Value'] = fuel_cost
print(fuel_cost)
Data.loc['GHI','Value'] = GHI
Data = Data.transpose()
Data = np.array(Data)
if get_investment_cost:
filename = 'Regressions/NPC_regression.joblib'
lm_NPC = load(filename)
NPC = lm_NPC.predict(Data)
return NPC + td_investment_cost
if get_invesment_transmission:
return td_investment_cost
else:
filename = 'Regressions/LCOE_regression.joblib'
lm_LCOE = load(filename)
LCOE = lm_LCOE.predict(list(Data))
filename = 'Regressions/demand_regression_Actualize.joblib'
lm_D_A = load(filename)
demand_actualize = lm_D_A.predict(HouseHolds)/(1 - self.distribution_losses)
LCOE += (td_investment_cost/demand_actualize)*1000
#print(mv_total_line_cost,lv_total_line_cost,total_nodes * self.connection_cost_per_hh,
# service_transformer_total_cost)
#print(td_investment_cost)
#if service_transformer_total_cost != 7000: print(service_transformer_total_cost)
return LCOE
else:
# normal onsset code
# So we also return the total investment cost for this number of people
if get_investment_cost:
discounted_investments = investments / discount_factor
return np.sum(discounted_investments) + (self.grid_capacity_investment * peak_load)
elif get_investment_cost_lv:
return total_lv_lines_length * (LV_line_cost * conf_grid_pen[conf_status])
elif get_investment_cost_mv:
return (mv_lines_connection_length * MV_line_cost * (1 + self.existing_grid_cost_ratio * elec_loop) +
mv_lines_distribution_length * MV_line_cost) * conf_grid_pen[conf_status]
elif get_investment_cost_hv:
return hv_lines_total_length * (HV_line_cost * conf_grid_pen[conf_status]) * \
(1 + self.existing_grid_cost_ratio * elec_loop)
elif get_investment_cost_transformer:
return (No_of_HV_LV_substation * HV_LV_sub_station_cost +
No_of_HV_MV_substation * HV_MV_sub_station_cost +
No_of_MV_MV_substation * MV_MV_sub_station_cost +
No_of_MV_LV_substation * MV_LV_sub_station_cost) * conf_grid_pen[conf_status]
elif get_investment_cost_connection:
return total_nodes * self.connection_cost_per_hh
else:
#here the LCOE is made!!
discounted_costs = (investments + operation_and_maintenance + fuel - salvage) / discount_factor
discounted_generation = el_gen / discount_factor
return np.sum(discounted_costs) / np.sum(discounted_generation)
class SettlementProcessor:
"""
Processes the dataframe and adds all the columns to determine the cheapest option and the final costs and summaries
"""
def __init__(self, path):
try:
self.df = pd.read_csv(path)
except FileNotFoundError:
print('You need to first split into a base directory and prep!')
raise
def condition_df(self, country):
"""
Do any initial data conditioning that may be required.
"""
logging.info('Ensure that columns that are supposed to be numeric are numeric')
self.df[SET_GHI] = pd.to_numeric(self.df[SET_GHI], errors='coerce')
self.df[SET_WINDVEL] = pd.to_numeric(self.df[SET_WINDVEL], errors='coerce')
self.df[SET_NIGHT_LIGHTS] = pd.to_numeric(self.df[SET_NIGHT_LIGHTS], errors='coerce')
self.df[SET_ELEVATION] = pd.to_numeric(self.df[SET_ELEVATION], errors='coerce')
self.df[SET_SLOPE] = pd.to_numeric(self.df[SET_SLOPE], errors='coerce')
self.df[SET_LAND_COVER] =
|
pd.to_numeric(self.df[SET_LAND_COVER], errors='coerce')
|
pandas.to_numeric
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.