prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 18:11:05 2020
@author: charlie.henry
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 28 15:46:31 2019
@author: berkunis
"""
##############################################01_02_PythonLibraries#####################################################
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.impute import SimpleImputer
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
#import data
data = | pd.read_csv("volumes-3.csv") | pandas.read_csv |
from ._learner import KilobotLearner
import os
import gc
import logging
from typing import Generator
from cluster_work import InvalidParameterArgument
from kb_learning.kernel import KilobotEnvKernel
from kb_learning.kernel import compute_median_bandwidth, compute_median_bandwidth_kilobots, angle_from_swarm_mean, \
step_towards_center
from kb_learning.ac_reps.lstd import LeastSquaresTemporalDifference
from kb_learning.ac_reps.reps import ActorCriticReps
from kb_learning.ac_reps.spwgp import SparseWeightedGP
from kb_learning.envs.sampler import ParallelSARSSampler
from kb_learning.envs import register_object_relative_env
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
logger = logging.getLogger('kb_learning')
class ACRepsLearner(KilobotLearner):
_restore_supported = True
_default_params = {
'sampling': {
'num_kilobots': 15,
'w_factor': .0,
'num_episodes': 100,
'num_steps_per_episode': 125,
'num_SARS_samples': 10000,
'num_workers': None,
'object_shape': 'quad',
'object_width': .15,
'object_height': .15,
'save_trajectories': True,
'light_type': 'circular',
'light_radius': .2,
'observe_object': False
},
'kernel': {
'kb_dist': 'embedded',
'l_dist': 'maha',
'a_dist': 'maha',
'w_dist': 'maha',
'bandwidth_factor_kb': .3,
'bandwidth_factor_light': .55,
'bandwidth_factor_action': .8,
'bandwidth_factor_extra': .3,
'rho': .5,
'variance': 1.,
},
'learn_iterations': 1,
'lstd': {
'discount_factor': .99,
'num_features': 1000,
'num_policy_samples': 5,
},
'ac_reps': {
'epsilon': .3,
'alpha': .0
},
'gp': {
'prior_variance': 7e-5,
'noise_variance': 1e-5,
'num_sparse_states': 1000,
'kb_dist': 'embedded',
'l_dist': 'maha',
'w_dist': 'maha',
'bandwidth_factor_kb': .3,
'bandwidth_factor_light': .55,
'bandwidth_factor_extra': .3,
'rho': .5,
'use_prior_mean': True
},
'eval': {
'num_episodes': 50,
'num_steps_per_episode': 100,
'save_trajectories': True
}
}
def __init__(self):
super().__init__()
self.state_preprocessor = None
self.state_kernel = None
self.state_action_kernel = None
self.policy = None
self.sars = None
self.theta = None
self.it_sars = None
self.eval_sars = None
self.it_info = None
self.eval_info = None
self.lstd_samples = None
self.sampler = None
self.kilobots_columns = None
self.light_columns = None
self.object_columns = None
self.extra_columns = None
self.action_columns = None
self.reward_columns = None
self.state_columns = None
self.next_state_columns = None
self.sars_columns = None
self.state_object_columns = None
self._light_dimensions = None
self._action_dimensions = None
def iterate(self, config: dict, rep: int, n: int) -> dict:
sampling_params = self._params['sampling']
np.random.seed(self._seed)
logger.info('sampling environment')
self.sampler.seed = self._seed + 123
self.it_sars, self.it_info = self.sampler(self.policy)
samples_episode_reward = self.it_sars['R'].groupby(level=0).sum()
samples_episode_reward_mean = samples_episode_reward.mean()
logger.info('samples episode reward mean: {:.6f}'.format(samples_episode_reward_mean))
# add samples to data set and select subset
_extended_sars = self.sars.append(self.it_sars, ignore_index=True)
# _extended_sars.reset_index(drop=True)
if n == 0:
kernel_params = self._params['kernel']
gp_params = self._params['gp']
# compute kernel parameters
logger.debug('computing kernel bandwidths.')
if self.state_preprocessor:
bandwidth_kb = compute_median_bandwidth(_extended_sars[self.kilobots_columns], sample_size=500,
preprocessor=self.state_preprocessor)
else:
bandwidth_kb = compute_median_bandwidth_kilobots(_extended_sars[self.kilobots_columns], sample_size=500)
self.state_kernel.kilobots_bandwidth = bandwidth_kb * kernel_params['bandwidth_factor_kb']
self.state_action_kernel.kilobots_bandwidth = bandwidth_kb * kernel_params['bandwidth_factor_kb']
self.policy.kernel.kilobots_bandwidth = bandwidth_kb * gp_params['bandwidth_factor_kb']
if self._light_dimensions:
bandwidth_l = compute_median_bandwidth(_extended_sars[self.light_columns], sample_size=500)
self.state_kernel.light_bandwidth = bandwidth_l * kernel_params['bandwidth_factor_light']
self.state_action_kernel.light_bandwidth = bandwidth_l * kernel_params['bandwidth_factor_light']
self.policy.kernel.light_bandwidth = bandwidth_l * gp_params['bandwidth_factor_light']
if self.extra_columns is not None:
bandwidth_extra = compute_median_bandwidth(_extended_sars[self.extra_columns], sample_size=500)
self.state_kernel.extra_dim_bandwidth = bandwidth_extra * kernel_params['bandwidth_factor_extra']
self.state_action_kernel.extra_dim_bandwidth = bandwidth_extra * kernel_params['bandwidth_factor_extra']
self.policy.kernel.extra_dim_bandwidth = bandwidth_extra * gp_params['bandwidth_factor_extra']
# self.state_kernel.extra_dim_bandwidth = kernel_params['bandwidth_factor_extra']
# self.state_action_kernel.extra_dim_bandwidth = kernel_params['bandwidth_factor_extra']
# self.policy.kernel.extra_dim_bandwidth = gp_params['bandwidth_factor_extra']
bandwidth_a = compute_median_bandwidth(_extended_sars['A'], sample_size=500)
bandwidth_a *= kernel_params['bandwidth_factor_action']
self.state_action_kernel.action_bandwidth = bandwidth_a
logger.debug('selecting SARS samples.')
if _extended_sars.shape[0] <= sampling_params['num_SARS_samples']:
self.sars = _extended_sars
else:
# _sampling_weights = _extended_sars.R.values - _extended_sars.R.min()
self.sars = _extended_sars.sample(sampling_params['num_SARS_samples']) # , weights=_sampling_weights)
self.sars.reset_index(drop=True, inplace=True)
del _extended_sars
# compute feature matrices
logger.debug('selecting lstd samples.')
# lstd_reference = select_reference_set_by_kernel_activation(data=self.sars[['S', 'A']],
# size=self._params['lstd']['num_features'],
# kernel_function=self.state_kernel,
# batch_size=10)
#
# self.lstd_samples = self.sars.loc[lstd_reference][['S', 'A']]
self.lstd_samples = self.sars[['S', 'A']].sample(self._params['lstd']['num_features'])
def state_features(state):
if state.ndim == 1:
state = state.reshape((1, -1))
return self.state_kernel(state, self.lstd_samples['S'].values)
self._old_state_features = state_features
def state_action_features(state, action):
if state.ndim == 1:
state = state.reshape((1, -1))
if action.ndim == 1:
action = action.reshape((1, -1))
return self.state_action_kernel(np.c_[state, action], self.lstd_samples.values)
self._old_state_action_features = state_action_features
# compute state-action features
logger.info('compute state-action features')
phi_SA = state_action_features(self.sars['S'].values, self.sars[['A']].values)
# compute state features
logger.info('compute state features')
phi_S = state_features(self.sars['S'].values)
for i in range(self._params['learn_iterations']):
# compute next state-action features
logger.info('compute next state-action features')
policy_samples = self._params['lstd']['num_policy_samples']
# next_actions = np.array([self.policy(self.sars['S_'].values) for _ in range(policy_samples)]).mean(axis=0)
next_actions = self.policy.get_mean(self.sars['S_'].values)
phi_SA_next = state_action_features(self.sars['S_'].values, next_actions)
# learn theta (parameters of Q-function) using lstd
logger.info('learning theta [LSTD]')
lstd = LeastSquaresTemporalDifference()
lstd.discount_factor = self._params['lstd']['discount_factor']
self.theta = lstd.learn_q_function(phi_SA, phi_SA_next, rewards=self.sars[['R']].values)
# compute q-function
logger.debug('compute q-function')
q_fct = phi_SA.dot(self.theta)
# TODO add generalized advantage here
# compute sample weights using AC-REPS
logger.info('learning weights [AC-REPS]')
ac_reps = ActorCriticReps()
ac_reps.epsilon = self._params['ac_reps']['epsilon']
ac_reps.alpha = self._params['ac_reps']['alpha']
weights, self.theta = ac_reps.compute_weights(q_fct, phi_S, return_theta=True)
# get subset for sparse GP
logger.debug('select samples for GP')
# weights_sort_index = np.argsort(weights)
# gp_reference = pd.Index(weights_sort_index[-self._params['gp']['num_sparse_states']:])
gp_reference = pd.Index(np.random.choice(self.sars.index.values,
size=self._params['gp']['num_sparse_states'],
replace=False, p=weights))
# gp_reference = select_reference_set_by_kernel_activation(data=self._SARS[['S']],
# size=self._params['gp']['num_sparse_states'],
# kernel_function=self._state_kernel,
# batch_size=10, start_from=lstd_reference)
gp_samples = self.sars['S'].loc[gp_reference].values
# gp_samples = self._SARS['S'].sample(self._params['gp']['num_sparse_states']).values
# fit weighted GP to samples
logger.info('fitting GP to policy')
# self.policy = self._init_policy(self.state_kernel,
# (self.sampler.env.action_space.low, self.sampler.env.action_space.high))
self.policy.train(inputs=self.sars['S'].values, outputs=self.sars['A'].values, weights=weights,
sparse_inputs=gp_samples, optimize=True)
# evaluate policy
logger.info('evaluating policy')
self.sampler.seed = 5555
self.policy.eval_mode = True
self.eval_sars, self.eval_info = self.sampler(self.policy,
num_episodes=self._params['eval']['num_episodes'],
num_steps_per_episode=self._params['eval'][
'num_steps_per_episode'])
self.policy.eval_mode = False
eval_episode_reward = self.eval_sars['R'].groupby(level=0).sum()
eval_episode_reward_mean = eval_episode_reward.mean()
logger.info('eval mean episode reward: {:.6f}'.format(eval_episode_reward_mean))
return_dict = dict(samples_episode_reward_mean=samples_episode_reward_mean,
eval_episode_reward_mean=eval_episode_reward_mean)
gc.collect()
return return_dict
def reset(self, config: dict, rep: int) -> None:
self._init_SARS()
self.state_kernel, self.state_action_kernel = self._init_kernels()
self.sampler = self._init_sampler()
self.policy = self._init_policy(output_bounds=(self.sampler.env.action_space.low,
self.sampler.env.action_space.high))
def finalize(self):
del self.sampler
self.sampler = None
def _init_kernels(self):
kernel_params = self._params['kernel']
sampling_params = self._params['sampling']
if kernel_params['kb_dist'] == 'mean':
from kb_learning.kernel import compute_mean_position
self.state_preprocessor = compute_mean_position
elif kernel_params['kb_dist'] == 'mean-cov':
from kb_learning.kernel import compute_mean_and_cov_position
self.state_preprocessor = compute_mean_and_cov_position
if sampling_params['light_type'] == 'circular':
self._light_dimensions = 2
self._action_dimensions = 2
elif sampling_params['light_type'] == 'linear':
self._light_dimensions = 0
self._action_dimensions = 1
elif sampling_params['light_type'] == 'dual':
self._light_dimensions = 4
self._action_dimensions = 4
extra_dim = 0
if sampling_params['w_factor'] is None:
extra_dim += 1
if sampling_params['observe_object']:
if sampling_params['observe_object'] == 'orientation':
extra_dim += 2
elif sampling_params['observe_object'] == 'position':
extra_dim += 2
elif sampling_params['observe_object'] == 'pose' or sampling_params['observe_object'] is True:
extra_dim += 4
state_kernel = KilobotEnvKernel(rho=kernel_params['rho'],
variance=kernel_params['variance'],
kilobots_dim=sampling_params['num_kilobots'] * 2,
light_dim=self._light_dimensions,
extra_dim=extra_dim,
kilobots_dist_class=kernel_params['kb_dist'],
light_dist_class=kernel_params['l_dist'],
extra_dim_dist_class=kernel_params['w_dist'])
state_action_kernel = KilobotEnvKernel(rho=kernel_params['rho'],
variance=kernel_params['variance'],
kilobots_dim=sampling_params['num_kilobots'] * 2,
light_dim=self._light_dimensions,
extra_dim=extra_dim,
action_dim=self._action_dimensions,
kilobots_dist_class=kernel_params['kb_dist'],
light_dist_class=kernel_params['l_dist'],
extra_dim_dist_class=kernel_params['w_dist'],
action_dist_class=kernel_params['a_dist'])
return state_kernel, state_action_kernel
def _init_policy(self, output_bounds):
kernel_params = self._params['gp']
gp_params = self._params['gp']
sampling_params = self._params['sampling']
extra_dim = 0
if sampling_params['w_factor'] is None:
extra_dim += 1
if sampling_params['observe_object']:
if sampling_params['observe_object'] == 'orientation':
extra_dim += 2
elif sampling_params['observe_object'] == 'position':
extra_dim += 2
elif sampling_params['observe_object'] == 'pose' or sampling_params['observe_object'] is True:
extra_dim += 4
gp_kernel = KilobotEnvKernel(rho=gp_params['rho'],
variance=gp_params['prior_variance'],
kilobots_dim=sampling_params['num_kilobots'] * 2,
light_dim=self._light_dimensions,
extra_dim=extra_dim,
kilobots_dist_class=kernel_params['kb_dist'],
light_dist_class=kernel_params['l_dist'],
extra_dim_dist_class=kernel_params['w_dist'])
gp_kernel.variance.fix()
if self._params['gp']['use_prior_mean']:
if sampling_params['light_type'] == 'circular':
mean_function = step_towards_center([-2, -1])
elif sampling_params['light_type'] == 'linear':
mean_function = angle_from_swarm_mean(range(sampling_params['num_kilobots'] * 2))
else:
raise InvalidParameterArgument('Unknown argument for parameter \'sampling.light_type\': \'{}\''.format(
sampling_params['light_type']))
else:
mean_function = None
policy = SparseWeightedGP(kernel=gp_kernel, noise_variance=self._params['gp']['noise_variance'],
mean_function=mean_function, output_bounds=output_bounds,
output_dim=output_bounds[0].shape[0])
return policy
def _init_SARS(self):
sampling_params = self._params['sampling']
kb_columns_level = ['kb_{}'.format(i) for i in range(self._params['sampling']['num_kilobots'])]
self.kilobots_columns = pd.MultiIndex.from_product([['S'], kb_columns_level, ['x', 'y']])
self.state_columns = self.kilobots_columns.copy()
self.state_object_columns = self.state_columns.copy()
self.object_columns = pd.MultiIndex.from_product([['S'], ['object'], ['x', 'y', 'theta']])
if sampling_params['light_type'] == 'circular':
self.light_columns = pd.MultiIndex.from_product([['S'], ['light'], ['x', 'y']])
self.action_columns = pd.MultiIndex.from_product([['A'], ['x', 'y'], ['']])
self.state_columns = self.state_columns.append(self.light_columns)
elif sampling_params['light_type'] == 'linear':
self.light_columns = pd.MultiIndex.from_product([['S'], ['light'], ['theta']])
self.action_columns = pd.MultiIndex.from_product([['A'], ['theta'], ['']])
elif sampling_params['light_type'] == 'dual':
self.light_columns = pd.MultiIndex.from_product([['S'], ['light1', 'light2'], ['x', 'y']])
self.action_columns = pd.MultiIndex.from_product([['A'], ['light1', 'light2'], ['x', 'y']])
self.state_columns = self.state_columns.append(self.light_columns)
self.state_object_columns = self.state_object_columns.append(self.light_columns)
self.extra_columns = None
if sampling_params['w_factor'] is None:
weight_columns = pd.MultiIndex.from_product([['S'], ['extra'], ['w']])
self.extra_columns = weight_columns
self.state_object_columns = self.state_object_columns.append(weight_columns)
if sampling_params['observe_object']:
object_observation_columns = None
if sampling_params['observe_object'] == 'orientation':
object_observation_columns = pd.MultiIndex.from_product([['S'], ['extra'], ['sin_o_t', 'cos_o_t']])
elif sampling_params['observe_object'] == 'position':
object_observation_columns = pd.MultiIndex.from_product([['S'], ['extra'], ['o_x', 'o_y']])
elif sampling_params['observe_object'] == 'pose' or sampling_params['observe_object'] is True:
object_observation_columns = | pd.MultiIndex.from_product([['S'], ['extra'], ['o_x', 'o_y', 'sin_o_t', 'cos_o_t']]) | pandas.MultiIndex.from_product |
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5,4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
df.to_stata('test.dta', write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with tm.ensure_clean() as path:
encoded.to_stata(path,encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
tm.assert_equal(len(w), 1) # should get a warning for that format.
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col].convert_objects(convert_numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(expected['date_td'], coerce=True)
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['var'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp, data_label=data_label)
reader = StataReader(path)
parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M'))
assert parsed_time_stamp == time_stamp
assert reader.data_label == data_label
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path), InvalidColumnName)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
def test_nan_to_missing_value(self):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ['x', 'y']
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)),
columns=columns)
original.index.name = 'index_not_written'
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
tm.assertRaises(KeyError,
lambda: written_and_read_again['index_not_written'])
def test_string_no_dates(self):
s1 = Series(['a', 'A longer string'])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3})
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path),
PossiblePrecisionLoss)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified['s1'] = Series(modified['s1'], dtype=np.int16)
modified['s2'] = Series(modified['s2'], dtype=np.int32)
modified['s3'] = Series(modified['s3'], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path, {0: 'tc'}),
InvalidColumnName)
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified.columns = ['_0']
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_date_export_formats(self):
columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty']
conversions = dict(((c, c) for c in columns))
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = 'index'
expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1)] # Year
expected = DataFrame([expected_values], columns=columns)
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_write_missing_strings(self):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = DataFrame([["1"], [""]], columns=["foo"])
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_bool_uint(self):
s0 = Series([0, 1, True], dtype=np.bool)
s1 = Series([0, 1, 100], dtype=np.uint8)
s2 = Series([0, 1, 255], dtype=np.uint8)
s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16)
s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16)
s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32)
s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3,
's4': s4, 's5': s5, 's6': s6})
original.index.name = 'index'
expected = original.copy()
expected_types = (np.int8, np.int8, np.int16, np.int16, np.int32,
np.int32, np.float64)
for c, t in zip(expected.columns, expected_types):
expected[c] = expected[c].astype(t)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, expected)
def test_variable_labels(self):
sr_115 = StataReader(self.dta16_115).variable_labels()
sr_117 = StataReader(self.dta16_117).variable_labels()
keys = ('var1', 'var2', 'var3')
labels = ('label1', 'label2', 'label3')
for k,v in compat.iteritems(sr_115):
self.assertTrue(k in sr_117)
self.assertTrue(v == sr_117[k])
self.assertTrue(k in keys)
self.assertTrue(v in labels)
def test_minimal_size_col(self):
str_lens = (1, 100, 244)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
sr = StataReader(path)
typlist = sr.typlist
variables = sr.varlist
formats = sr.fmtlist
for variable, fmt, typ in zip(variables, formats, typlist):
self.assertTrue(int(variable[1:]) == int(fmt[1:-1]))
self.assertTrue(int(variable[1:]) == typ)
def test_excessively_long_string(self):
str_lens = (1, 244, 500)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.assertRaises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_missing_value_generator(self):
types = ('b','h','l')
df = DataFrame([[0.0]],columns=['float_'])
with tm.ensure_clean() as path:
df.to_stata(path)
valid_range = StataReader(path).VALID_RANGE
expected_values = ['.' + chr(97 + i) for i in range(26)]
expected_values.insert(0, '.')
for t in types:
offset = valid_range[t][1]
for i in range(0,27):
val = StataMissingValue(offset+1+i)
self.assertTrue(val.string == expected_values[i])
# Test extremes for floats
val = StataMissingValue(struct.unpack('<f',b'\x00\x00\x00\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack('<f',b'\x00\xd0\x00\x7f')[0])
self.assertTrue(val.string == '.z')
# Test extremes for floats
val = StataMissingValue(struct.unpack('<d',b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack('<d',b'\x00\x00\x00\x00\x00\x1a\xe0\x7f')[0])
self.assertTrue(val.string == '.z')
def test_missing_value_conversion(self):
columns = ['int8_', 'int16_', 'int32_', 'float32_', 'float64_']
smv = StataMissingValue(101)
keys = [key for key in iterkeys(smv.MISSING_VALUES)]
keys.sort()
data = []
for i in range(27):
row = [StataMissingValue(keys[i+(j*27)]) for j in range(5)]
data.append(row)
expected = DataFrame(data,columns=columns)
parsed_113 = read_stata(self.dta17_113, convert_missing=True)
parsed_115 = read_stata(self.dta17_115, convert_missing=True)
parsed_117 = read_stata(self.dta17_117, convert_missing=True)
tm.assert_frame_equal(expected, parsed_113)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
def test_big_dates(self):
yr = [1960, 2000, 9999, 100, 2262, 1677]
mo = [1, 1, 12, 1, 4, 9]
dd = [1, 1, 31, 1, 22, 23]
hr = [0, 0, 23, 0, 0, 0]
mm = [0, 0, 59, 0, 0, 0]
ss = [0, 0, 59, 0, 0, 0]
expected = []
for i in range(len(yr)):
row = []
for j in range(7):
if j == 0:
row.append(
datetime(yr[i], mo[i], dd[i], hr[i], mm[i], ss[i]))
elif j == 6:
row.append(datetime(yr[i], 1, 1))
else:
row.append(datetime(yr[i], mo[i], dd[i]))
expected.append(row)
expected.append([NaT] * 7)
columns = ['date_tc', 'date_td', 'date_tw', 'date_tm', 'date_tq',
'date_th', 'date_ty']
# Fixes for weekly, quarterly,half,year
expected[2][2] = datetime(9999,12,24)
expected[2][3] = datetime(9999,12,1)
expected[2][4] = datetime(9999,10,1)
expected[2][5] = datetime(9999,7,1)
expected[4][2] = datetime(2262,4,16)
expected[4][3] = expected[4][4] = datetime(2262,4,1)
expected[4][5] = expected[4][6] = datetime(2262,1,1)
expected[5][2] = expected[5][3] = expected[5][4] = datetime(1677,10,1)
expected[5][5] = expected[5][6] = datetime(1678,1,1)
expected = DataFrame(expected, columns=columns, dtype=np.object)
parsed_115 = read_stata(self.dta18_115)
parsed_117 = read_stata(self.dta18_117)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
date_conversion = dict((c, c[-2:]) for c in columns)
#{c : c[-2:] for c in columns}
with tm.ensure_clean() as path:
expected.index.name = 'index'
expected.to_stata(path, date_conversion)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_dtype_conversion(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
no_conversion = read_stata(self.dta15_117,
convert_dates=True)
tm.assert_frame_equal(expected, no_conversion)
conversion = read_stata(self.dta15_117,
convert_dates=True,
preserve_dtypes=False)
# read_csv types are the same
expected = self.read_csv(self.csv15)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
tm.assert_frame_equal(expected, conversion)
def test_drop_column(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
columns = ['byte_', 'int_', 'long_']
expected = expected[columns]
dropped = read_stata(self.dta15_117, convert_dates=True,
columns=columns)
tm.assert_frame_equal(expected, dropped)
with tm.assertRaises(ValueError):
columns = ['byte_', 'byte_']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
with tm.assertRaises(ValueError):
columns = ['byte_', 'int_', 'long_', 'not_found']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
def test_categorical_writing(self):
original = DataFrame.from_records(
[
["one", "ten", "one", "one", "one", 1],
["two", "nine", "two", "two", "two", 2],
["three", "eight", "three", "three", "three", 3],
["four", "seven", 4, "four", "four", 4],
["five", "six", 5, np.nan, "five", 5],
["six", "five", 6, np.nan, "six", 6],
["seven", "four", 7, np.nan, "seven", 7],
["eight", "three", 8, np.nan, "eight", 8],
["nine", "two", 9, np.nan, "nine", 9],
["ten", "one", "ten", np.nan, "ten", 10]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled', 'unlabeled'])
expected = original.copy()
# these are all categoricals
original = pd.concat([original[col].astype('category') for col in original], axis=1)
expected['incompletely_labeled'] = expected['incompletely_labeled'].apply(str)
expected['unlabeled'] = expected['unlabeled'].apply(str)
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
expected.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
# Silence warnings
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), expected)
def test_categorical_warnings_and_errors(self):
# Warning for non-string labels
# Error for labels too long
original = pd.DataFrame.from_records(
[['a' * 10000],
['b' * 10000],
['c' * 10000],
['d' * 10000]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
with tm.ensure_clean() as path:
tm.assertRaises(ValueError, original.to_stata, path)
original = pd.DataFrame.from_records(
[['a'],
['b'],
['c'],
['d'],
[1]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
with warnings.catch_warnings(record=True) as w:
original.to_stata(path)
tm.assert_equal(len(w), 1) # should get a warning for mixed content
def test_categorical_with_stata_missing_values(self):
values = [['a' + str(i)] for i in range(120)]
values.append([np.nan])
original = pd.DataFrame.from_records(values, columns=['many_labels'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
def test_categorical_order(self):
# Directly construct using expected codes
# Format is is_cat, col_name, labels (in order), underlying data
expected = [(True, 'ordered', ['a', 'b', 'c', 'd', 'e'], np.arange(5)),
(True, 'reverse', ['a', 'b', 'c', 'd', 'e'], np.arange(5)[::-1]),
(True, 'noorder', ['a', 'b', 'c', 'd', 'e'], np.array([2, 1, 4, 0, 3])),
(True, 'floating', ['a', 'b', 'c', 'd', 'e'], np.arange(0, 5)),
(True, 'float_missing', ['a', 'd', 'e'], np.array([0, 1, 2, -1, -1])),
(False, 'nolabel', [1.0, 2.0, 3.0, 4.0, 5.0], np.arange(5)),
(True, 'int32_mixed', ['d', 2, 'e', 'b', 'a'], np.arange(5))]
cols = []
for is_cat, col, labels, codes in expected:
if is_cat:
cols.append((col, pd.Categorical.from_codes(codes, labels)))
else:
cols.append((col, pd.Series(labels, dtype=np.float32)))
expected = DataFrame.from_items(cols)
# Read with and with out categoricals, ensure order is identical
parsed_115 = read_stata(self.dta19_115)
parsed_117 = read_stata(self.dta19_117)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
# Check identity of codes
for col in expected:
if is_categorical_dtype(expected[col]):
tm.assert_series_equal(expected[col].cat.codes,
parsed_115[col].cat.codes)
tm.assert_index_equal(expected[col].cat.categories,
parsed_115[col].cat.categories)
def test_categorical_sorting(self):
parsed_115 = read_stata(self.dta20_115)
parsed_117 = read_stata(self.dta20_117)
# Sort based on codes, not strings
parsed_115 = parsed_115.sort("srh")
parsed_117 = parsed_117.sort("srh")
# Don't sort index
parsed_115.index = np.arange(parsed_115.shape[0])
parsed_117.index = np.arange(parsed_117.shape[0])
codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4]
categories = ["Poor", "Fair", "Good", "Very good", "Excellent"]
expected = pd.Series(pd.Categorical.from_codes(codes=codes,
categories=categories))
tm.assert_series_equal(expected, parsed_115["srh"])
tm.assert_series_equal(expected, parsed_117["srh"])
def test_categorical_ordering(self):
parsed_115 = read_stata(self.dta19_115)
parsed_117 = read_stata(self.dta19_117)
parsed_115_unordered = read_stata(self.dta19_115,
order_categoricals=False)
parsed_117_unordered = read_stata(self.dta19_117,
order_categoricals=False)
for col in parsed_115:
if not is_categorical_dtype(parsed_115[col]):
continue
tm.assert_equal(True, parsed_115[col].cat.ordered)
tm.assert_equal(True, parsed_117[col].cat.ordered)
tm.assert_equal(False, parsed_115_unordered[col].cat.ordered)
tm.assert_equal(False, parsed_117_unordered[col].cat.ordered)
def test_read_chunks_117(self):
files_117 = [self.dta1_117, self.dta2_117, self.dta3_117,
self.dta4_117, self.dta14_117, self.dta15_117,
self.dta16_117, self.dta17_117, self.dta18_117,
self.dta19_117, self.dta20_117]
for fname in files_117:
for chunksize in 1,2:
for convert_categoricals in False, True:
for convert_dates in False, True:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(fname, convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
itr = read_stata(fname, iterator=True)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos:pos+chunksize, :]
try:
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
except AssertionError:
# datetime.datetime and pandas.tslib.Timestamp may hold
# equivalent values but fail assert_frame_equal
assert(all([x == y for x, y in zip(from_frame, chunk)]))
pos += chunksize
def test_iterator(self):
fname = self.dta3_117
parsed = read_stata(fname)
itr = read_stata(fname, iterator=True)
chunk = itr.read(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
itr = read_stata(fname, chunksize=5)
chunk = list(itr)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0])
itr = | read_stata(fname, iterator=True) | pandas.io.stata.read_stata |
import numpy as np
import pandas as pd
import xarray as xr
class HyData:
def __init__(self, files, stations):
self.stations = stations
self.files = files
# print('Reading Data....')
def read(self):
ds = xr.Dataset()
for station in self.stations:
ds[station] = self._read(station).to_array(dim="geo")
return ds
def _read(self, station):
files = self.files
dates = [filename.split(".")[-1] for filename in files]
dates = | pd.to_datetime(dates, format="%Y%m%d%H") | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 6 11:40:40 2020
@author: hendrick
"""
# =============================================================================
# # # # import packages
# =============================================================================
import numpy as np
import pandas as pd
import matplotlib.tri as tri
import matplotlib.pyplot as plt
import bmi.wrapper
import os
from scipy.optimize import newton
from tqdm import tqdm
import datetime
from netCDF4 import Dataset
import faulthandler
faulthandler.enable()
# =============================================================================
# # # # specify directories of ddl- and input-files
# =============================================================================
model_folder = os.path.join('ModelReduction', 'mr004')
# Delft3D directories
D3D_HOME = os.path.join('p:\\11202744-008-vegetation-modelling', 'code_1709',
'windows', 'oss_artifacts_x64_63721', 'x64')
dflow_dir = os.path.join(D3D_HOME, 'dflowfm', 'bin', 'dflowfm.dll')
dimr_path = os.path.join(D3D_HOME, 'dimr', 'bin', 'dimr_dll.dll')
# work directory
workdir = os.path.join('p:\\11202744-008-vegetation-modelling', 'students',
'GijsHendrickx', 'models', model_folder)
inputdir = os.path.join(workdir, 'timeseries')
# input files (Delft3D)
config_file = os.path.join(workdir, 'dimr_config.xml')
mdufile = os.path.join(workdir, 'fm', 'FlowFM.mdu')
# print directories and input-files as check
print('Model : {0}\n'.format(workdir))
print('Delft3D home : {0}'.format(D3D_HOME))
print('DIMR-directory : {0}'.format(dimr_path))
print('Configuration file : {0}'.format(config_file))
# =============================================================================
# # # # prepare locations
# =============================================================================
# # print directories of input- and output-files
print('\nTime-series dir. : {0}'.format(inputdir))
# # intermediate figures
figfolder = os.path.join(workdir, 'figures')
# check existence and create if necessary
if not os.path.exists(figfolder):
os.mkdir(figfolder)
print('New folder created : {0}'.format(figfolder))
print('Figure directory : {0}'.format(figfolder))
# # output files
outputfolder = os.path.join(workdir, 'output')
# check existance and create if necessary
if not os.path.exists(outputfolder):
os.mkdir(outputfolder)
print('New folder created : {0}'.format(outputfolder))
print('Output directory : {0}'.format(outputfolder))
# =============================================================================
# # # # create correct environment
# =============================================================================
os.environ['PATH'] = (os.path.join(D3D_HOME, 'share', 'bin') + ';' +
os.path.join(D3D_HOME, 'dflowfm', 'bin') + ';' +
os.path.join(D3D_HOME, 'dimr', 'bin') + ';' +
os.path.join(D3D_HOME, 'dwaves', 'bin') + ';' +
os.path.join(D3D_HOME, 'esmf', 'scripts') + ';' +
os.path.join(D3D_HOME, 'swan', 'scripts'))
# print created environment as check
print('\nEnvironment : {0}\n'
.format(os.path.join(D3D_HOME, 'share', 'bin')) +
' {0}\n'
.format(os.path.join(D3D_HOME, 'dflowfm', 'bin')) +
' {0}\n'
.format(os.path.join(D3D_HOME, 'dimr', 'bin')) +
' {0}\n'
.format(os.path.join(D3D_HOME, 'dwaves', 'bin')) +
' {0}\n'
.format(os.path.join(D3D_HOME, 'esmf', 'scripts')) +
' {0}\n'
.format(os.path.join(D3D_HOME, 'swan', 'scripts')))
# =============================================================================
# # # # define and initialize wrappers
# =============================================================================
# define DFM wrapper
modelFM = bmi.wrapper.BMIWrapper(engine=dflow_dir, configfile=mdufile)
# define DIMR wrapper
modelDIMR = bmi.wrapper.BMIWrapper(engine=dimr_path, configfile=config_file)
# initialise model
modelDIMR.initialize()
print('Model initialized.\n')
# =============================================================================
# # # # set the pointers to important model variables of FlowFM
# =============================================================================
# number of boxes, including boundary boxes
ndx = modelFM.get_var('ndx')
# number of non-boundary boxes, i.e. within-domain boxes
ndxi = modelFM.get_var('ndxi')
# x-coord. of the center of gravity of the boxes
xzw = modelFM.get_var('xzw')
# y-coord. of the center of gravity of the boxes
yzw = modelFM.get_var('yzw')
# total number of links between boxes
lnx = modelFM.get_var('lnx')
# number of links between within-domain boxes
lnxi = modelFM.get_var('lnxi')
# link martrix between adjacent boxes [ln, 2] matrix
ln = modelFM.get_var('ln')
# distance between the centers of adjacent boxes
dx = modelFM.get_var('dx')
# width of the interface between adjacent boxes
wu = modelFM.get_var('wu')
# surface area of the boxes
ba = modelFM.get_var('ba')
# =============================================================================
# # # # set time parameters for coupled model
# =============================================================================
# # time-span
# start year
Ystart = 2000
# simulation time [yrs]
Y = 100
# year range
years = np.arange(Ystart, Ystart + Y)
# # model time per vegetation step [s]
mtpervt = 43200
# storm
mtpervt_storm = 86400
# =============================================================================
# # # # define output
# =============================================================================
# # # map > full spatial extent
# # data to output file
# mean flow > { uc }
U2mfile = True
# mean coral temperature > { Tc, Tlo, Thi }
T2mfile = True
# mean photosynthesis > { PS }
PS2mfile = True
# population states > { P } > { PH, PR, PP, PB }
P2mfile = True
# calcification > { G }
G2mfile = True
# morphology > { Lc } > { dc, hc, bc, tc, ac }
M2mfile = True
# # map-file
# map-file directory
mapfile = 'CoralModel_map.nc'
mapfilef = os.path.join(outputfolder, mapfile)
# time-interval > annually
# # # history > time-series
# # data to output file
# flow > { uc }
U2hfile = True
# temperature > { Tc, Tlo, Thi }
T2hfile = True
# photosynthesis > { PS }
PS2hfile = True
# population states > { P } > { PH, PR, PP, PB }
P2hfile = True
# calcification > { G }
G2hfile = True
# morphology > { Lc } > { dc, hc, bc, tc, ac }
M2hfile = True
# # his-file
# location(s)
xynfilef = os.path.join(workdir, 'fm', 'FlowFm_obs.xyn')
xyn = pd.read_csv(xynfilef, header=None, delim_whitespace=True)
xyn.columns = ['x', 'y', 'name']
# his-file directory
hisfile = 'CoralModel_his.nc'
hisfilef = os.path.join(outputfolder, hisfile)
# time-interval > daily
# =============================================================================
# # # # vegetation boundaries
# =============================================================================
xbndmin = min(xzw[range(ndxi)])
xbndmax = max(xzw[range(ndxi)])
ybndmin = min(yzw[range(ndxi)])
ybndmax = max(yzw[range(ndxi)])
xvbndmin = xbndmin
xvbndmax = xbndmax # (xbndmin + xbndmax) / 2
yvbndmin = ybndmin
yvbndmax = 700.
# =============================================================================
# # # # enabling/disabling processes
# =============================================================================
# in-canopy flow
cft = False
# thermal boundary layer
tbl = False
# # process dependencies
if not cft:
tbl = False
# # corfac
corfac = 1.
# =============================================================================
# # # # model constants
# =============================================================================
# species constant
Csp = 1.
# # light
# light-attenuation coefficient
Kd0 = .1
# # hydrodynamics
# Smagorinsky constant
Cs = .17
# inertia constant
Cm = 1.7
# friction coefficient
Cf = .01
# wave-current angle (degrees)
wcangle = 0.
# # temperature
# TBL coefficient
K0 = 80.
# thermal acclimation coefficient
Kvar = 2.45
# # acidity
# aragonite saturation state
omega0 = 5.
# # morphology
# overall form proportionality constant
Xf = .1
# flow plate proportionality constant
Xpu = .1
# light spacing proportionality constant
XsI = .1
# flow spacing proportionality constant
Xsu = .1
# # dislodgement
# tensile strength substratum [N m^-2]
sigmat = 1e6
# # recruitment
# probability of settlement [-]
ps = 1e-4
# larvae due to spawning [-]
Nl = 1e6
# larval diameter [m]
dl = 1e-3
# =============================================================================
# # # # initial conditions
# =============================================================================
# # initial morphology
dc0 = .05 # m
hc0 = .3 # m
bc0 = .5 * dc0
tc0 = .5 * hc0
ac0 = .2 # m
# # carrying capacity
K = np.zeros(ndxi)
K[np.logical_and.reduce((xzw >= xvbndmin,
xzw <= xvbndmax,
yzw >= yvbndmin,
yzw <= yvbndmax))] = 1.
# # coral cover
P0 = np.array([
K,
np.zeros(K.shape),
np.zeros(K.shape),
np.zeros(K.shape)
]).transpose()
# =============================================================================
# # # # read time-series
# =============================================================================
# # light conditions > { I0['par'] }
filef = os.path.join(inputdir, 'TS_PAR.txt')
I0 = | pd.read_csv(filef, sep='\t') | pandas.read_csv |
"""
SparseArray data structure
"""
from __future__ import division
import numbers
import operator
import re
from typing import Any, Callable, Union
import warnings
import numpy as np
from pandas._libs import index as libindex, lib
import pandas._libs.sparse as splib
from pandas._libs.sparse import BlockIndex, IntIndex, SparseIndex
from pandas._libs.tslibs import NaT
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.cast import (
astype_nansafe, construct_1d_arraylike_from_scalar, find_common_type,
infer_dtype_from_scalar, maybe_convert_platform)
from pandas.core.dtypes.common import (
is_array_like, is_bool_dtype, is_datetime64_any_dtype, is_dtype_equal,
is_integer, is_list_like, is_object_dtype, is_scalar, is_string_dtype,
pandas_dtype)
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.generic import (
ABCIndexClass, ABCSeries, ABCSparseSeries)
from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algos
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
import pandas.io.formats.printing as printing
# ----------------------------------------------------------------------------
# Dtype
@register_extension_dtype
class SparseDtype(ExtensionDtype):
"""
Dtype for data stored in :class:`SparseArray`.
This dtype implements the pandas ExtensionDtype interface.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
The dtype of the underlying array storing the non-fill value values.
fill_value : scalar, optional
The scalar value not stored in the SparseArray. By default, this
depends on `dtype`.
=========== ==========
dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool ``False``
datetime64 ``pd.NaT``
timedelta64 ``pd.NaT``
=========== ==========
The default value may be overridden by specifying a `fill_value`.
"""
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
# between SparseDtype(float, 0.0) and SparseDtype(float, nan).
# Without is_na_fill_value in the comparison, those would be equal since
# hash(nan) is (sometimes?) 0.
_metadata = ('_dtype', '_fill_value', '_is_na_fill_value')
def __init__(self, dtype=np.float64, fill_value=None):
# type: (Union[str, np.dtype, 'ExtensionDtype', type], Any) -> None
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.dtypes.common import (
pandas_dtype, is_string_dtype, is_scalar
)
if isinstance(dtype, type(self)):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
dtype = pandas_dtype(dtype)
if is_string_dtype(dtype):
dtype = np.dtype('object')
if fill_value is None:
fill_value = na_value_for_dtype(dtype)
if not is_scalar(fill_value):
raise ValueError("fill_value must be a scalar. Got {} "
"instead".format(fill_value))
self._dtype = dtype
self._fill_value = fill_value
def __hash__(self):
# Python3 doesn't inherit __hash__ when a base class overrides
# __eq__, so we explicitly do it here.
return super(SparseDtype, self).__hash__()
def __eq__(self, other):
# We have to override __eq__ to handle NA values in _metadata.
# The base class does simple == checks, which fail for NA.
if isinstance(other, compat.string_types):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if isinstance(other, type(self)):
subtype = self.subtype == other.subtype
if self._is_na_fill_value:
# this case is complicated by two things:
# SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
# SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
# i.e. we want to treat any floating-point NaN as equal, but
# not a floating-point NaN and a datetime NaT.
fill_value = (
other._is_na_fill_value and
isinstance(self.fill_value, type(other.fill_value)) or
isinstance(other.fill_value, type(self.fill_value))
)
else:
fill_value = self.fill_value == other.fill_value
return subtype and fill_value
return False
@property
def fill_value(self):
"""
The fill value of the array.
Converting the SparseArray to a dense ndarray will fill the
array with this value.
.. warning::
It's possible to end up with a SparseArray that has ``fill_value``
values in ``sp_values``. This can occur, for example, when setting
``SparseArray.fill_value`` directly.
"""
return self._fill_value
@property
def _is_na_fill_value(self):
from pandas.core.dtypes.missing import isna
return isna(self.fill_value)
@property
def _is_numeric(self):
from pandas.core.dtypes.common import is_object_dtype
return not is_object_dtype(self.subtype)
@property
def _is_boolean(self):
from pandas.core.dtypes.common import is_bool_dtype
return is_bool_dtype(self.subtype)
@property
def kind(self):
"""
The sparse kind. Either 'integer', or 'block'.
"""
return self.subtype.kind
@property
def type(self):
return self.subtype.type
@property
def subtype(self):
return self._dtype
@property
def name(self):
return 'Sparse[{}, {}]'.format(self.subtype.name, self.fill_value)
def __repr__(self):
return self.name
@classmethod
def construct_array_type(cls):
return SparseArray
@classmethod
def construct_from_string(cls, string):
"""
Construct a SparseDtype from a string form.
Parameters
----------
string : str
Can take the following forms.
string dtype
================ ============================
'int' SparseDtype[np.int64, 0]
'Sparse' SparseDtype[np.float64, nan]
'Sparse[int]' SparseDtype[np.int64, 0]
'Sparse[int, 0]' SparseDtype[np.int64, 0]
================ ============================
It is not possible to specify non-default fill values
with a string. An argument like ``'Sparse[int, 1]'``
will raise a ``TypeError`` because the default fill value
for integers is 0.
Returns
-------
SparseDtype
"""
msg = "Could not construct SparseDtype from '{}'".format(string)
if string.startswith("Sparse"):
try:
sub_type, has_fill_value = cls._parse_subtype(string)
result = SparseDtype(sub_type)
except Exception:
raise TypeError(msg)
else:
msg = ("Could not construct SparseDtype from '{}'.\n\nIt "
"looks like the fill_value in the string is not "
"the default for the dtype. Non-default fill_values "
"are not supported. Use the 'SparseDtype()' "
"constructor instead.")
if has_fill_value and str(result) != string:
raise TypeError(msg.format(string))
return result
else:
raise TypeError(msg)
@staticmethod
def _parse_subtype(dtype):
"""
Parse a string to get the subtype
Parameters
----------
dtype : str
A string like
* Sparse[subtype]
* Sparse[subtype, fill_value]
Returns
-------
subtype : str
Raises
------
ValueError
When the subtype cannot be extracted.
"""
xpr = re.compile(
r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$"
)
m = xpr.match(dtype)
has_fill_value = False
if m:
subtype = m.groupdict()['subtype']
has_fill_value = m.groupdict()['fill_value'] or has_fill_value
elif dtype == "Sparse":
subtype = 'float64'
else:
raise ValueError("Cannot parse {}".format(dtype))
return subtype, has_fill_value
@classmethod
def is_dtype(cls, dtype):
dtype = getattr(dtype, 'dtype', dtype)
if (isinstance(dtype, compat.string_types) and
dtype.startswith("Sparse")):
sub_type, _ = cls._parse_subtype(dtype)
dtype = np.dtype(sub_type)
elif isinstance(dtype, cls):
return True
return isinstance(dtype, np.dtype) or dtype == 'Sparse'
def update_dtype(self, dtype):
"""
Convert the SparseDtype to a new dtype.
This takes care of converting the ``fill_value``.
Parameters
----------
dtype : Union[str, numpy.dtype, SparseDtype]
The new dtype to use.
* For a SparseDtype, it is simply returned
* For a NumPy dtype (or str), the current fill value
is converted to the new dtype, and a SparseDtype
with `dtype` and the new fill value is returned.
Returns
-------
SparseDtype
A new SparseDtype with the corret `dtype` and fill value
for that `dtype`.
Raises
------
ValueError
When the current fill value cannot be converted to the
new `dtype` (e.g. trying to convert ``np.nan`` to an
integer dtype).
Examples
--------
>>> SparseDtype(int, 0).update_dtype(float)
Sparse[float64, 0.0]
>>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
Sparse[float64, nan]
"""
cls = type(self)
dtype = pandas_dtype(dtype)
if not isinstance(dtype, cls):
fill_value = astype_nansafe(np.array(self.fill_value),
dtype).item()
dtype = cls(dtype, fill_value=fill_value)
return dtype
@property
def _subtype_with_str(self):
"""
Whether the SparseDtype's subtype should be considered ``str``.
Typically, pandas will store string data in an object-dtype array.
When converting values to a dtype, e.g. in ``.astype``, we need to
be more specific, we need the actual underlying type.
Returns
-------
>>> SparseDtype(int, 1)._subtype_with_str
dtype('int64')
>>> SparseDtype(object, 1)._subtype_with_str
dtype('O')
>>> dtype = SparseDtype(str, '')
>>> dtype.subtype
dtype('O')
>>> dtype._subtype_with_str
str
"""
if isinstance(self.fill_value, compat.string_types):
return type(self.fill_value)
return self.subtype
# ----------------------------------------------------------------------------
# Array
_sparray_doc_kwargs = dict(klass='SparseArray')
def _get_fill(arr):
# type: (SparseArray) -> np.ndarray
"""
Create a 0-dim ndarray containing the fill value
Parameters
----------
arr : SparseArray
Returns
-------
fill_value : ndarray
0-dim ndarray with just the fill value.
Notes
-----
coerce fill_value to arr dtype if possible
int64 SparseArray can have NaN as fill_value if there is no missing
"""
try:
return np.asarray(arr.fill_value, dtype=arr.dtype.subtype)
except ValueError:
return np.asarray(arr.fill_value)
def _sparse_array_op(left, right, op, name):
# type: (SparseArray, SparseArray, Callable, str) -> Any
"""
Perform a binary operation between two arrays.
Parameters
----------
left : Union[SparseArray, ndarray]
right : Union[SparseArray, ndarray]
op : Callable
The binary operation to perform
name str
Name of the callable.
Returns
-------
SparseArray
"""
if name.startswith('__'):
# For lookups in _libs.sparse we need non-dunder op name
name = name[2:-2]
# dtype used to find corresponding sparse method
ltype = left.dtype.subtype
rtype = right.dtype.subtype
if not is_dtype_equal(ltype, rtype):
subtype = find_common_type([ltype, rtype])
ltype = SparseDtype(subtype, left.fill_value)
rtype = SparseDtype(subtype, right.fill_value)
# TODO(GH-23092): pass copy=False. Need to fix astype_nansafe
left = left.astype(ltype)
right = right.astype(rtype)
dtype = ltype.subtype
else:
dtype = ltype
# dtype the result must have
result_dtype = None
if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:
with np.errstate(all='ignore'):
result = op(left.get_values(), right.get_values())
fill = op(_get_fill(left), _get_fill(right))
if left.sp_index.ngaps == 0:
index = left.sp_index
else:
index = right.sp_index
elif left.sp_index.equals(right.sp_index):
with np.errstate(all='ignore'):
result = op(left.sp_values, right.sp_values)
fill = op(_get_fill(left), _get_fill(right))
index = left.sp_index
else:
if name[0] == 'r':
left, right = right, left
name = name[1:]
if name in ('and', 'or') and dtype == 'bool':
opname = 'sparse_{name}_uint8'.format(name=name)
# to make template simple, cast here
left_sp_values = left.sp_values.view(np.uint8)
right_sp_values = right.sp_values.view(np.uint8)
result_dtype = np.bool
else:
opname = 'sparse_{name}_{dtype}'.format(name=name, dtype=dtype)
left_sp_values = left.sp_values
right_sp_values = right.sp_values
sparse_op = getattr(splib, opname)
with np.errstate(all='ignore'):
result, index, fill = sparse_op(
left_sp_values, left.sp_index, left.fill_value,
right_sp_values, right.sp_index, right.fill_value)
if result_dtype is None:
result_dtype = result.dtype
return _wrap_result(name, result, index, fill, dtype=result_dtype)
def _wrap_result(name, data, sparse_index, fill_value, dtype=None):
"""
wrap op result to have correct dtype
"""
if name.startswith('__'):
# e.g. __eq__ --> eq
name = name[2:-2]
if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'):
dtype = np.bool
fill_value = lib.item_from_zerodim(fill_value)
if is_bool_dtype(dtype):
# fill_value may be np.bool_
fill_value = bool(fill_value)
return SparseArray(data,
sparse_index=sparse_index,
fill_value=fill_value,
dtype=dtype)
class SparseArray(PandasObject, ExtensionArray, ExtensionOpsMixin):
"""
An ExtensionArray for storing sparse data.
.. versionchanged:: 0.24.0
Implements the ExtensionArray interface.
Parameters
----------
data : array-like
A dense array of values to store in the SparseArray. This may contain
`fill_value`.
sparse_index : SparseIndex, optional
index : Index
fill_value : scalar, optional
Elements in `data` that are `fill_value` are not stored in the
SparseArray. For memory savings, this should be the most common value
in `data`. By default, `fill_value` depends on the dtype of `data`:
=========== ==========
data.dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool False
datetime64 ``pd.NaT``
timedelta64 ``pd.NaT``
=========== ==========
The fill value is potentiall specified in three ways. In order of
precedence, these are
1. The `fill_value` argument
2. ``dtype.fill_value`` if `fill_value` is None and `dtype` is
a ``SparseDtype``
3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype`
is not a ``SparseDtype`` and `data` is a ``SparseArray``.
kind : {'integer', 'block'}, default 'integer'
The type of storage for sparse locations.
* 'block': Stores a `block` and `block_length` for each
contiguous *span* of sparse values. This is best when
sparse data tends to be clumped together, with large
regsions of ``fill-value`` values between sparse values.
* 'integer': uses an integer to store the location of
each sparse value.
dtype : np.dtype or SparseDtype, optional
The dtype to use for the SparseArray. For numpy dtypes, this
determines the dtype of ``self.sp_values``. For SparseDtype,
this determines ``self.sp_values`` and ``self.fill_value``.
copy : bool, default False
Whether to explicitly copy the incoming `data` array.
"""
__array_priority__ = 15
_pandas_ftype = 'sparse'
_subtyp = 'sparse_array' # register ABCSparseArray
def __init__(self, data, sparse_index=None, index=None, fill_value=None,
kind='integer', dtype=None, copy=False):
from pandas.core.internals import SingleBlockManager
if isinstance(data, SingleBlockManager):
data = data.internal_values()
if fill_value is None and isinstance(dtype, SparseDtype):
fill_value = dtype.fill_value
if isinstance(data, (type(self), ABCSparseSeries)):
# disable normal inference on dtype, sparse_index, & fill_value
if sparse_index is None:
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
if dtype is None:
dtype = data.dtype
# TODO: make kind=None, and use data.kind?
data = data.sp_values
# Handle use-provided dtype
if isinstance(dtype, compat.string_types):
# Two options: dtype='int', regular numpy dtype
# or dtype='Sparse[int]', a sparse dtype
try:
dtype = SparseDtype.construct_from_string(dtype)
except TypeError:
dtype = pandas_dtype(dtype)
if isinstance(dtype, SparseDtype):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
if index is not None and not | is_scalar(data) | pandas.core.dtypes.common.is_scalar |
import pandas as pd
import pytest
from pandera import Column, DataFrameSchema, Check
from pandera import dtypes
from pandera.errors import SchemaError
def test_numeric_dtypes():
for dtype in [
dtypes.Float,
dtypes.Float16,
dtypes.Float32,
dtypes.Float64]:
schema = DataFrameSchema({"col": Column(dtype, nullable=False)})
validated_df = schema.validate(
pd.DataFrame(
{"col": [-123.1, -7654.321, 1.0, 1.1, 1199.51, 5.1, 4.6]},
dtype=dtype.value))
assert isinstance(validated_df, pd.DataFrame)
for dtype in [
dtypes.Int,
dtypes.Int8,
dtypes.Int16,
dtypes.Int32,
dtypes.Int64]:
schema = DataFrameSchema({"col": Column(dtype, nullable=False)})
validated_df = schema.validate(
pd.DataFrame(
{"col": [-712, -4, -321, 0, 1, 777, 5, 123, 9000]},
dtype=dtype.value))
assert isinstance(validated_df, pd.DataFrame)
for dtype in [
dtypes.UInt8,
dtypes.UInt16,
dtypes.UInt32,
dtypes.UInt64]:
schema = DataFrameSchema({"col": Column(dtype, nullable=False)})
validated_df = schema.validate(
pd.DataFrame(
{"col": [1, 777, 5, 123, 9000]}, dtype=dtype.value))
assert isinstance(validated_df, pd.DataFrame)
def test_category_dtype():
schema = DataFrameSchema(
columns={
"col": Column(
dtypes.Category,
checks=[
Check(lambda s: set(s) == {"A", "B", "C"}),
Check(lambda s:
s.cat.categories.tolist() == ["A", "B", "C"]),
Check(lambda s: s.isin(["A", "B", "C"]))
],
nullable=False
),
},
coerce=False
)
validated_df = schema.validate(
pd.DataFrame(
{"col": pd.Series(["A", "B", "A", "B", "C"], dtype="category")}
)
)
assert isinstance(validated_df, pd.DataFrame)
def test_category_dtype_coerce():
columns = {
"col": Column(
dtypes.Category,
checks=Check(lambda s: set(s) == {"A", "B", "C"}),
nullable=False
),
}
with pytest.raises(SchemaError):
DataFrameSchema(columns=columns, coerce=False).validate(
pd.DataFrame(
{"col": | pd.Series(["A", "B", "A", "B", "C"], dtype="object") | pandas.Series |
import scipy.io as sio
import matplotlib.pyplot as plt
import os
import numpy as np
import logging
import argparse
import pandas as pd
def find_normalized_errors(preds, y, ord):
diffs = preds - y
raw_errors = np.linalg.norm(diffs, ord=ord, axis=1)
raw_mean = raw_errors.mean()
norms = np.linalg.norm(y, ord=ord, axis=1)
normalized_errors = np.divide(raw_errors, norms)
return normalized_errors
def load_data(fp):
N_TEST = 100
SUB = 2**3
data = sio.loadmat(fp)
X = data['a'][:,::SUB]
y = data['u'][:,::SUB]
del data
X = X[-N_TEST:,:]
y = y[-N_TEST:,:]
return (X, y)
def load_preds(fp):
try:
x = sio.loadmat(fp)['pred']
return x
except KeyError:
print( sio.loadmat(fp).keys())
raise ValueError
def quick_hist(data, t, xlabel, fp):
plt.hist(data)
plt.ylabel("counts", size=13)
plt.xlabel(xlabel, size=13)
plt.title(t)
plt.savefig(fp)
plt.clf()
def plot_one_testcase(X, y, preds, i, t, fp=None):
err = y[i] - preds[i]
plt.plot(X[i], label='input')
plt.plot(y[i],'-', label='target')
plt.plot(preds[i], '--', label='predictions')
plt.plot(err, label='errors')
plt.legend()
plt.title(t)
if fp is not None:
plt.savefig(fp)
plt.clf()
else:
plt.show()
def keep_k_fourier_modes(preds, k):
fourier_coeffs = np.fft.fft(preds)
# We want the first k and last k entries of axis 1 for each row in axis 0
fourier_coeffs[:,k+1:-k] = 0+0j
out_arr = np.fft.ifft(fourier_coeffs)
# out_arr is real (up to numerical precision) but explicitly casting it avoids
# numpy warnings
return np.real(out_arr)
def plot_truncated_testcase(X, y, preds, trunc_preds, i, t, fp=None):
plt.plot(preds[i], '-', label='predictions')
plt.plot(trunc_preds[i], '--', label='truncated predictions')
plt.legend()
plt.title(t)
if fp is not None:
plt.savefig(fp)
plt.clf()
else:
plt.show()
def plot_experiment(key, X, y, preds_fp, plots_out):
logging.info("Working on experiment: {}".format(key))
preds = load_preds(preds_fp)
# Compute and report average errors
l2_normalized_errors = find_normalized_errors(preds, y, 2)
decreasing_l2_error_indices = np.flip(np.argsort(l2_normalized_errors))
for i in range(10):
idx = decreasing_l2_error_indices[i]
l2_err = l2_normalized_errors[idx]
t = "{} test sample {}: norm. l2 error {:.4f}".format(key, idx, l2_err)
fp = os.path.join(plots_out, '{}_worst_l2_errors_{}.png'.format(key, i))
plot_one_testcase(X, y, preds, idx, t, fp)
# Specially-chosen test cases for all of the frequencies
for i in [64]:
l2_err = l2_normalized_errors[i]
t = "{} test sample {}: norm. l2 error {:.4f}".format(key, i, l2_err)
fp = os.path.join(plots_out, '{}_test_sample_{}.png'.format(key, i))
plot_one_testcase(X, y, preds, i, t, fp)
# Truncate the Fourier modes and recompute errors. In the next section,
# we will be using 2**k Fourier modes.
fourier_results = pd.DataFrame(columns=['k', 'num_modes', 'l2_norm_error'])
for k in range(1, 11):
num_modes = int(2**k)
truncated_preds = keep_k_fourier_modes(preds, num_modes)
l2_norm_error = find_normalized_errors(truncated_preds, y, 2).mean()
fourier_results = fourier_results.append({'k':k, 'num_modes': num_modes, 'l2_norm_error': l2_norm_error}, ignore_index=True)
for i in [1, 64, 75, 86]:
t = "{} test sample {}: truncated to {} frequency modes".format(key, i, num_modes)
fp = os.path.join(plots_out, '{}_truncated_pred_{}_test_sample_{}.png'.format(key, num_modes, i))
plot_truncated_testcase(X, y, preds, truncated_preds, i, t, fp)
plt.plot(fourier_results.k.values, fourier_results.l2_norm_error.values, '.')
plt.xticks(fourier_results.k.values, labels=["%i" % 2**j for j in fourier_results.k.values])
plt.xlabel("Fourier modes kept", size=13)
plt.ylabel("$L_2$ Normalized Error", size=13)
t = "{}: Errors after truncation to {} modes".format(key, num_modes)
plt.title(t)
fp_2 = os.path.join(plots_out, '{}_errors_after_truncation.png'.format(key))
plt.savefig(fp_2)
plt.clf()
def main(args):
if not os.path.isdir(args.plots_dir):
logging.info("Making output directory at {}".format(args.plots_dir))
os.mkdir(args.plots_dir)
results_df = | pd.read_table(args.results_df) | pandas.read_table |
#! /usr/bin/python3
# -*- coding: utf-8 -*-
import torch
import click
from tqdm import tqdm
import numpy as np
from sklearn.metrics import fbeta_score
import pandas as pd
from model.model import get_model
from util.util import make_output_dir
from config.config import load_config
from data.dataset import ImetDataset
import os
import glob
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
def valid_one_epoch(valid_loader, model, epoch="?", threshold=0.5, flip_tta=False, pickle_name=None):
model.eval()
img_size = valid_loader.dataset.img_size
valid_df_base = []
with torch.no_grad():
sigmoid = torch.nn.Sigmoid()
fbetas = []
valid_pbar = tqdm(valid_loader, total=len(valid_loader))
for sample in valid_pbar:
images, labels = sample['image'].type(torch.FloatTensor).to(DEVICE), \
sample['y'].type(torch.FloatTensor).to(DEVICE)
logits = sigmoid(model(images))
if flip_tta:
# horizontal flipをしている
flipped = images[:, :, :, torch.arange(img_size - 1, -1, -1)]
logits_tta = sigmoid(model(flipped))
logits = (logits + logits_tta) / 2.
fbeta = fbeta_score(y_pred=np.uint8((logits.data.cpu().numpy() > threshold)),
y_true=labels.data.cpu().numpy(),
beta=2,
average="samples")
fbetas.append(fbeta)
valid_pbar.set_postfix(fbeta=np.mean(fbetas), epoch=epoch)
if pickle_name:
logits_arr = logits.data.cpu().numpy()
labels_arr = labels.data.cpu().numpy()
for i in range(len(labels_arr)):
valid_df_base.append({"logit": logits_arr[i], "label": labels_arr[i]})
if pickle_name:
if not ".pickle" in pickle_name:
pickle_name = "{}.pickle".format(pickle_name)
| pd.DataFrame(valid_df_base) | pandas.DataFrame |
import sys
import unittest
import pandas as pd
from src.preprocessing import format_ocean_proximity
class FormattingTestCase(unittest.TestCase):
def setUp(self):
self.ref_df = | pd.read_csv("housing.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
from tqdm import tqdm
import datetime
from scipy import stats
pd.plotting.register_matplotlib_converters() # addresses complaints about Timestamp instead of float for plotting x-values
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.ticker as mtick
from matplotlib import collections as mplc
import joblib
from scipy.stats import norm as sp_norm
plt.style.use('seaborn-darkgrid')
matplotlib.use('Agg')
from time import time as get_time
from os import path
import os
from enum import Enum
from yattag import Doc
import logging
logging.basicConfig(level=logging.INFO)
class Region(Enum):
US_states = 'US_states'
countries = 'countries'
US_counties = 'US_counties'
provinces = 'provinces'
def __str__(self):
return str(self.value)
class ApproxType(Enum):
__order__ = 'BS LS MCMC SM PyMC3 Hess SM_acc SM_TS SP_CF NDT_Hess NDT_Jac SP_min SP_LS NUTS Emcee'
BS = ('BS', 'bootstrap')
LS = ('LS', 'likelihood_samples')
MCMC = ('MCMC', 'random_walk')
SM = ('SM', 'statsmodels')
PyMC3 = ('PyMC3', 'PyMC3')
Hess = ('Hess', 'hessian')
SM_acc = ('SM_acc', 'statsmodels_acc')
SM_TS = ('SM_TS', 'statsmodels_time_series')
SP_CF = ('SP_CF', 'curve_fit_covariance')
NDT_Hess = ('NDT_Hess', 'numdifftools_hessian')
NDT_Jac = ('NDT_Jac', 'numdifftools_jacobian')
SP_min = ('SP_min', 'scipy_minimize')
SP_LS = ('SP_LS', 'scipy_least_squares')
NUTS = ('NUTS', 'no_u_turn_random_walk')
Emcee = ('Emcee', 'emcee')
def __str__(self):
return str(self.value)
class Stopwatch:
def __init__(self, set_to_neg_inf=False):
if not set_to_neg_inf:
self.time0 = get_time()
else:
self.time0 = -1e12
def elapsed_time(self):
return get_time() - self.time0
def reset(self):
self.time0 = get_time()
def set_to_neg_inf(self):
self.time0 = -1e12
# def render_whisker_plot_split(state_report,
# plot_param_name='alpha_2',
# output_filename_format_str='test_boxplot_for_{}_{}.png',
# opt_log=False,
# boxwidth=0.7,
# approx_types=[('SM', 'statsmodels')]):
#
# state_split = state_report['']
# for page in range(n_pages):
# if plot_param_name
# render_whisker_plot_simplified(state_report,
# plot_param_name='alpha_2',
# output_filename_format_str=output_filename_format_str.replace('.png', f'_page_{page}_of_{n_pages}.png'),
# opt_log=opt_log,
# boxwidth=boxwidth,
# approx_types=approx_types)
def render_whisker_plot_simplified(state_report,
plot_param_name='alpha_2',
output_filename_format_str='test_boxplot_for_{}_{}.png',
opt_log=False,
boxwidth=0.7,
approx_types=[('SM', 'statsmodels')],
opt_convert_growth_rate_to_percent=False):
'''
Plot all-state box/whiskers for given apram_name
:param state_report: full state report as pandas dataframe
:param param_name: param name as string
:param output_filename_format_str: format string for the output filename, with two open slots
:param opt_log: boolean for log-transform x-axis
:return: None, it saves plots to files
'''
tmp_ind = [i for i, x in state_report.iterrows() if x['param'] == plot_param_name]
print('columns:')
print(state_report.columns)
tmp_ind = sorted(tmp_ind, key=lambda x: state_report.iloc[x][f'{approx_types[0].value[0]}_p50'])
small_state_report = state_report.iloc[tmp_ind]
small_state_report.to_csv('simplified_state_report_{}.csv'.format(plot_param_name))
if ApproxType.SM_TS in approx_types:
approx_types.remove(ApproxType.SM_TS)
for approx_type in approx_types:
try:
param_name_abbr, param_name = approx_type.value
latex_str = small_state_report[
[f'{param_name_abbr}_p5', f'{param_name_abbr}_p50', f'{param_name_abbr}_p95']].to_latex(index=False,
float_format="{:0.4f}".format)
print(param_name)
print(latex_str)
except:
pass
def convert_growth_rate_to_perc(x):
if opt_convert_growth_rate_to_percent:
return np.exp(x) - 1
else:
return x
map_approx_type_to_boxes = dict()
for approx_type in approx_types:
try:
param_name_abbr, param_name = approx_type.value
tmp_list = list()
for i in range(len(small_state_report)):
row = pd.DataFrame([small_state_report.iloc[i]])
new_box = \
{
'label': approx_type.value[1],
'whislo': convert_growth_rate_to_perc(row[f'{param_name_abbr}_p5'].values[0]), # Bottom whisker position
'q1': convert_growth_rate_to_perc(row[f'{param_name_abbr}_p25'].values[0]), # First quartile (25th percentile)
'med': convert_growth_rate_to_perc(row[f'{param_name_abbr}_p50'].values[0]), # Median (50th percentile)
'q3': convert_growth_rate_to_perc(row[f'{param_name_abbr}_p75'].values[0]), # Third quartile (75th percentile)
'whishi': convert_growth_rate_to_perc(row[f'{param_name_abbr}_p95'].values[0]), # Top whisker position
'fliers': [] # Outliers
}
tmp_list.append(new_box)
map_approx_type_to_boxes[param_name_abbr] = tmp_list
except:
pass
plt.close()
plt.clf()
fig, ax = plt.subplots()
fig.set_size_inches(8, 10.5)
# add vertical line at zero
plt.axvline(linestyle='--', x=0, color='black')
n_groups = len(map_approx_type_to_boxes)
# when just one group, there's extra space between each state we need to account for
if n_groups == 1:
boxwidth = 1.2
map_approx_type_to_ax = dict()
for ind, approx_type in enumerate(sorted(map_approx_type_to_boxes)):
try:
map_approx_type_to_ax[approx_type] = ax.bxp(map_approx_type_to_boxes[approx_type], showfliers=False,
positions=range(1 + ind,
len(map_approx_type_to_boxes[approx_type]) * (
n_groups + 1), (n_groups + 1)),
widths=boxwidth, patch_artist=True, vert=False)
except:
pass
setup_boxes = map_approx_type_to_boxes[list(map_approx_type_to_boxes.keys())[0]]
# plt.yticks([x + 0.5 for x in range(1, len(BS_boxes) * (n_groups + 1), (n_groups + 1))], small_state_report['state'])
plt.yticks(range(1, len(setup_boxes) * (n_groups + 1), (n_groups + 1)), small_state_report['state'])
if opt_convert_growth_rate_to_percent and not opt_log:
ax.xaxis.set_major_formatter(mtick.PercentFormatter(xmax=1, decimals=2))
# fill with colors
colors = ['blue', 'red', 'green', 'purple', 'orange', 'brown', 'navy', 'teal', 'orchid', 'tan']
for approx_type, color in zip(sorted(map_approx_type_to_ax), colors):
try:
ax = map_approx_type_to_ax[approx_type]
for item in ['boxes', 'whiskers', 'fliers', 'medians', 'caps']:
for patch in ax[item]:
try:
patch.set_facecolor(color)
except:
pass
patch.set_color(color)
except:
pass
# lines = [[(0, 1), (1, 1)], [(2, 3), (3, 3)], [(1, 2), (1, 3)]]
# line_collxn = mc.LineCollection(lines, '.-', color='black')
# ax.add_collection(line_collxn)
# add legend
if n_groups > 1:
custom_lines = [
Line2D([0], [0], color=color, lw=4) for approx_type, color in zip(sorted(map_approx_type_to_ax), colors)
]
plt.legend(custom_lines, sorted(map_approx_type_to_ax))
# increase left margin
output_filename = output_filename_format_str.format(plot_param_name,
'_'.join(approx_type.value[1] for approx_type in approx_types))
plt.subplots_adjust(left=0.2)
if opt_log:
plt.xscale('log')
if 'slope' in plot_param_name and ApproxType.SM_acc not in approx_types:
plt.xlabel('Daily Relative Growth Rate')
elif 'slope' in plot_param_name and ApproxType.SM_acc in approx_types:
plt.xlabel('Absolute Week-over-Week Change in Daily Relative Growth Rate')
plt.savefig(output_filename, dpi=300)
# plt.boxplot(small_state_report['state'], small_state_report[['BS_p5', 'BS_p95']])
def generate_state_prediction(map_state_name_to_model,
override_max_date_str,
prediction_filename=None,
n_samples=1000):
max_date = datetime.datetime.strptime(override_max_date_str, '%Y-%m-%d')
all_predictions = list()
for state_ind, state in enumerate(map_state_name_to_model):
if state in map_state_name_to_model:
state_model = map_state_name_to_model[state]
else:
print(f'Skipping {state}!')
continue
print(f'Running predictions on {state} ({state_ind} of {len(map_state_name_to_model)})')
if state_model is not None:
for approx_type in state_model.model_approx_types:
if approx_type == ApproxType.SM or approx_type == ApproxType.SM_acc or approx_type == ApproxType.SM_TS:
params, _, _, log_probs = state_model.get_weighted_samples_via_statsmodels(n_samples=200)
elif approx_type == ApproxType.PyMC3:
params, _, _, log_probs = state_model.get_weighted_samples_via_PyMC3(n_samples=200)
print(approx_type)
param_inds_to_plot = list(range(len(params)))
param_inds_to_plot = np.random.choice(param_inds_to_plot, min(n_samples, len(param_inds_to_plot)),
replace=False)
sols_to_plot = [state_model.run_simulation(in_params=params[param_ind]) for param_ind in
tqdm(param_inds_to_plot)]
start_ind_sol = len(state_model.data_new_tested) + state_model.burn_in
start_ind_data = start_ind_sol - 1 - state_model.burn_in
sol_date_range = [
state_model.min_date - datetime.timedelta(days=state_model.burn_in) + datetime.timedelta(
days=1) * i for i in range(len(sols_to_plot[0][0]))]
sols_to_plot_new_tested = list()
sols_to_plot_new_dead = list()
sols_to_plot_tested = list()
sols_to_plot_dead = list()
for sol in sols_to_plot:
tested = [max(sol[1][i] - state_model.log_offset, 0) for i in range(len(sol[1]))]
tested_range = np.cumsum(tested[start_ind_sol:])
dead = [max(sol[1][i] - state_model.log_offset, 0) for i in range(len(sol[1]))]
dead_range = np.cumsum(dead[start_ind_sol:])
sols_to_plot_new_tested.append(tested)
sols_to_plot_new_dead.append(dead)
data_tested_at_start = np.cumsum(state_model.data_new_tested)[start_ind_data]
data_dead_at_start = np.cumsum(state_model.data_new_dead)[start_ind_data]
tested = [0] * start_ind_sol + [data_tested_at_start + tested_val for tested_val in tested_range]
dead = [0] * start_ind_sol + [data_dead_at_start + dead_val for dead_val in dead_range]
sols_to_plot_tested.append(tested)
sols_to_plot_dead.append(dead)
output_list_of_dicts = list()
for date_ind in range(start_ind_sol, len(sols_to_plot_tested[0])):
distro_new_tested = [tested[date_ind] for tested in sols_to_plot_new_tested]
distro_new_dead = [dead[date_ind] for dead in sols_to_plot_new_dead]
distro_tested = [tested[date_ind] for tested in sols_to_plot_tested]
distro_dead = [dead[date_ind] for dead in sols_to_plot_dead]
tmp_dict = {'model_type': approx_type.value[1],
'date': sol_date_range[date_ind],
'total_positive_mean': np.average(distro_tested),
'total_positive_std': np.std(distro_tested),
'total_positive_p5': np.percentile(distro_tested, 5),
'total_positive_p25': np.percentile(distro_tested, 25),
'total_positive_p50': np.percentile(distro_tested, 50),
'total_positive_p75': np.percentile(distro_tested, 75),
'total_positive_p95': np.percentile(distro_tested, 95),
'total_deceased_mean': np.average(distro_dead),
'total_deceased_std': np.std(distro_dead),
'total_deceased_p5': np.percentile(distro_dead, 5),
'total_deceased_p25': np.percentile(distro_dead, 25),
'total_deceased_p50': np.percentile(distro_dead, 50),
'total_deceased_p75': np.percentile(distro_dead, 75),
'total_deceased_p95': np.percentile(distro_dead, 95),
'new_positive_mean': np.average(distro_new_tested),
'new_positive_std': np.std(distro_new_tested),
'new_positive_p5': np.percentile(distro_new_tested, 5),
'new_positive_p25': np.percentile(distro_new_tested, 25),
'new_positive_p50': np.percentile(distro_new_tested, 50),
'new_positive_p75': np.percentile(distro_new_tested, 75),
'new_positive_p95': np.percentile(distro_new_tested, 95),
'new_deceased_mean': np.average(distro_new_dead),
'new_deceased_std': np.std(distro_new_dead),
'new_deceased_p5': np.percentile(distro_new_dead, 5),
'new_deceased_p25': np.percentile(distro_new_dead, 25),
'new_deceased_p50': np.percentile(distro_new_dead, 50),
'new_deceased_p75': np.percentile(distro_new_dead, 75),
'new_deceased_p95': np.percentile(distro_new_dead, 95),
}
output_list_of_dicts.append(tmp_dict.copy())
tmp_dict.update({'state': state})
all_predictions.append(tmp_dict.copy())
all_predictions = | pd.DataFrame(all_predictions) | pandas.DataFrame |
#%%
import pymaid
from pymaid_creds import url, name, password, token
rm = pymaid.CatmaidInstance(url, token, name, password)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
from data_settings import data_date, pairs_path
from contools import Promat, Prograph, Celltype, Celltype_Analyzer
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
adj = Promat.pull_adj('ad', data_date=data_date)
edges = Promat.pull_edges('ad', threshold=0.01, data_date=data_date, pairs_combined=False)
# ignore all outputs to A1 neurons; this will make sure dVNC feedback only goes to brain
# while also allowing ascending -> dVNC connections
#VNC_neurons = pymaid.get_skids_by_annotation('mw A1 neurons paired') + pymaid.get_skids_by_annotation('mw A00c')
#edges = edges[[x not in VNC_neurons for x in edges.downstream_skid]]
pairs = Promat.get_pairs(pairs_path=pairs_path)
dVNCs = pymaid.get_skids_by_annotation('mw dVNC')
dVNC_pairs = Promat.load_pairs_from_annotation('mw dVNC', pairs, return_type='all_pair_ids_bothsides', skids=dVNCs, use_skids=True)
# %%
# dVNC projectome data prep
import cmasher as cmr
projectome = pd.read_csv('data/projectome/projectome_adjacency.csv', index_col = 0, header = 0)
projectome.index = [str(x) for x in projectome.index]
# identify meshes
meshes = ['Brain Hemisphere left', 'Brain Hemisphere right', 'SEZ_left', 'SEZ_right', 'T1_left', 'T1_right', 'T2_left', 'T2_right', 'T3_left', 'T3_right', 'A1_left', 'A1_right', 'A2_left', 'A2_right', 'A3_left', 'A3_right', 'A4_left', 'A4_right', 'A5_left', 'A5_right', 'A6_left', 'A6_right', 'A7_left', 'A7_right', 'A8_left', 'A8_right']
pairOrder_dVNC = [x for sublist in zip(dVNC_pairs.leftid, dVNC_pairs.rightid) for x in sublist]
input_projectome = projectome.loc[meshes, [str(x) for x in pairOrder_dVNC]]
output_projectome = projectome.loc[[str(x) for x in pairOrder_dVNC], meshes]
dVNC_projectome_pairs_summed_output = []
indices = []
for i in np.arange(0, len(output_projectome.index), 2):
combined_pairs = (output_projectome.iloc[i, :] + output_projectome.iloc[i+1, :])
combined_hemisegs = []
for j in np.arange(0, len(combined_pairs), 2):
combined_hemisegs.append((combined_pairs[j] + combined_pairs[j+1]))
dVNC_projectome_pairs_summed_output.append(combined_hemisegs)
indices.append(output_projectome.index[i])
dVNC_projectome_pairs_summed_output = pd.DataFrame(dVNC_projectome_pairs_summed_output, index = indices, columns = ['brain','SEZ', 'T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'])
#dVNC_projectome_pairs_summed_output = dVNC_projectome_pairs_summed_output.iloc[:, 1:len(dVNC_projectome_pairs_summed_output)]
#normalize # of presynaptic sites
dVNC_projectome_pairs_summed_output_norm = dVNC_projectome_pairs_summed_output.copy()
for i in range(len(dVNC_projectome_pairs_summed_output)):
sum_row = sum(dVNC_projectome_pairs_summed_output_norm.iloc[i, :])
for j in range(len(dVNC_projectome_pairs_summed_output.columns)):
dVNC_projectome_pairs_summed_output_norm.iloc[i, j] = dVNC_projectome_pairs_summed_output_norm.iloc[i, j]/sum_row
# remove brain from columns
dVNC_projectome_pairs_summed_output_norm_no_brain = dVNC_projectome_pairs_summed_output_norm.iloc[:, 1:len(dVNC_projectome_pairs_summed_output)]
dVNC_projectome_pairs_summed_output_no_brain = dVNC_projectome_pairs_summed_output.iloc[:, 1:len(dVNC_projectome_pairs_summed_output)]
# %%
# ordering and plotting
# sorting with normalized data
sort_threshold = 0
dVNC_projectome_pairs_summed_output_sort_norm = dVNC_projectome_pairs_summed_output_norm_no_brain.copy()
dVNC_projectome_pairs_summed_output_sort_norm[dVNC_projectome_pairs_summed_output_sort_norm<sort_threshold]=0
order = ['SEZ', 'T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']
order.reverse()
dVNC_projectome_pairs_summed_output_sort_norm.sort_values(by=order, ascending=False, inplace=True)
sort = dVNC_projectome_pairs_summed_output_sort_norm.index
cmap = plt.cm.get_cmap('Blues') # modify 'Blues' cmap to have a white background
blue_cmap = cmap(np.linspace(0, 1, 20))
blue_cmap[0] = np.array([1, 1, 1, 1])
blue_cmap = mpl.colors.LinearSegmentedColormap.from_list(name='New_Blues', colors=blue_cmap)
cmap = blue_cmap
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.heatmap(dVNC_projectome_pairs_summed_output_norm.loc[sort, :], ax=ax, cmap=cmap)
plt.savefig(f'plots/projectome_A8-T1_sort_normalized_sortThres{sort_threshold}.pdf', bbox_inches='tight')
# sorting with raw data
sort_threshold = 0
dVNC_projectome_pairs_summed_output_sort = dVNC_projectome_pairs_summed_output_no_brain.copy()
dVNC_projectome_pairs_summed_output_sort[dVNC_projectome_pairs_summed_output_sort<sort_threshold]=0
order = ['SEZ', 'T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']
order.reverse()
dVNC_projectome_pairs_summed_output_sort.sort_values(by=order, ascending=False, inplace=True)
sort = dVNC_projectome_pairs_summed_output_sort.index
vmax = 70
cmap = blue_cmap
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.heatmap(dVNC_projectome_pairs_summed_output.loc[sort, :], ax=ax, cmap=cmap, vmax=vmax)
plt.savefig(f'plots/projectome_A8-T1_sort_projectome_sortThres{sort_threshold}.pdf', bbox_inches='tight')
dVNC_projectome_pairs_summed_output.to_csv('data/projectome/dVNC_projectome.csv')
# %%
# paths 2-hop upstream of each dVNC
from tqdm import tqdm
# sort dVNC pairs
sort = [int(x) for x in sort]
dVNC_pairs.set_index('leftid', drop=False, inplace=True)
dVNC_pairs = dVNC_pairs.loc[sort, :]
dVNC_pairs.reset_index(inplace=True, drop=True)
hops = 2
threshold = 0.01
ascendings = Celltype_Analyzer.get_skids_from_meta_annotation('mw A1 ascending') # exclude ascendings from downstream b/c only interested in downstream partners in the brain
dVNC_pair_paths_us = [Promat.upstream_multihop(edges=edges, sources=dVNC_pairs.loc[i].to_list(), hops=hops, pairs=pairs) for i in tqdm(range(0, len(dVNC_pairs)))]
dVNC_pair_paths_ds = [Promat.downstream_multihop(edges=edges, sources=dVNC_pairs.loc[i].to_list(), hops=hops, pairs=pairs, exclude=ascendings) for i in tqdm(range(0, len(dVNC_pairs)))]
# %%
# make bar plots for 1-hop and 2-hop
_, celltypes = Celltype_Analyzer.default_celltypes()
figsize = (2,0.5)
# UPSTREAM
us_1order = Celltype_Analyzer([Celltype(str(dVNC_pairs.loc[i].leftid) + '_us_1o', x[0]) for i, x in enumerate(dVNC_pair_paths_us)])
us_2order = Celltype_Analyzer([Celltype(str(dVNC_pairs.loc[i].leftid) + '_us_2o', x[1]) for i, x in enumerate(dVNC_pair_paths_us)])
us_1order.set_known_types(celltypes)
us_2order.set_known_types(celltypes)
path = 'plots/dVNC_partners_summary_plot_1st_order_upstream.pdf'
us_1order.plot_memberships(path = path, figsize=figsize)
path = 'plots/dVNC_partners_summary_plot_2nd_order_upstream.pdf'
us_2order.plot_memberships(path = path, figsize=figsize)
# DOWNSTREAM
ds_1order = Celltype_Analyzer([Celltype(str(dVNC_pairs.loc[i].leftid) + '_ds_1o', x[0]) for i, x in enumerate(dVNC_pair_paths_ds)])
ds_2order = Celltype_Analyzer([Celltype(str(dVNC_pairs.loc[i].leftid) + '_ds_2o', x[1]) for i, x in enumerate(dVNC_pair_paths_ds)])
ds_1order.set_known_types(celltypes)
ds_2order.set_known_types(celltypes)
path = 'plots/dVNC_partners_summary_plot_1st_order_downstream.pdf'
ds_1order.plot_memberships(path = path, figsize=figsize)
path = 'plots/dVNC_partners_summary_plot_2nd_order_downstream.pdf'
ds_2order.plot_memberships(path = path, figsize=figsize)
# %%
# categorizing dVNCs into candidate behaviors
fwd = (dVNC_projectome_pairs_summed_output.iloc[:, 2:].A8 == dVNC_projectome_pairs_summed_output.iloc[:, 2:].max(axis=1)) & (dVNC_projectome_pairs_summed_output.A8>0)
speed = (dVNC_projectome_pairs_summed_output.iloc[:, 2:].A8 != dVNC_projectome_pairs_summed_output.iloc[:, 2:].max(axis=1)) & (dVNC_projectome_pairs_summed_output.A8>0)
turn = (dVNC_projectome_pairs_summed_output.loc[:, ['A5', 'A6', 'A7', 'A8']].sum(axis=1)==0) & (dVNC_projectome_pairs_summed_output.loc[:, ['A2', 'A3', 'A4']].sum(axis=1)>0)
backup = (dVNC_projectome_pairs_summed_output.loc[:, ['A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']].sum(axis=1)==0) & (dVNC_projectome_pairs_summed_output.loc[:, ['A1']].sum(axis=1)>0)
head_hunch = (dVNC_projectome_pairs_summed_output.loc[:, ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']].sum(axis=1)==0) & (dVNC_projectome_pairs_summed_output.loc[:, ['T1', 'T2', 'T3']].sum(axis=1)>0)
fwd = dVNC_projectome_pairs_summed_output.index[fwd]
speed = dVNC_projectome_pairs_summed_output.index[speed]
turn = dVNC_projectome_pairs_summed_output.index[turn]
backup = dVNC_projectome_pairs_summed_output.index[backup]
head_hunch = dVNC_projectome_pairs_summed_output.index[head_hunch]
fwd_skids = Promat.get_paired_skids([int(x) for x in fwd], pairs)
speed_skids = Promat.get_paired_skids([int(x) for x in speed], pairs)
turn_skids = Promat.get_paired_skids([int(x) for x in turn], pairs)
backup_skids = Promat.get_paired_skids([int(x) for x in backup], pairs)
head_hunch_skids = Promat.get_paired_skids([int(x) for x in head_hunch], pairs)
pymaid.add_annotations([x for sublist in fwd_skids for x in sublist], 'mw candidate forward')
pymaid.add_annotations([x for sublist in speed_skids for x in sublist], 'mw candidate speed-modulation')
pymaid.add_annotations([x for sublist in turn_skids for x in sublist], 'mw candidate turn')
pymaid.add_annotations([x for sublist in backup_skids for x in sublist], 'mw candidate backup')
pymaid.add_annotations([x for sublist in head_hunch_skids for x in sublist], 'mw candidate hunch_head-move')
pymaid.add_meta_annotations(['mw candidate forward', 'mw candidate speed-modulation', 'mw candidate turn', 'mw candidate backup', 'mw candidate hunch_head-move'], 'mw dVNC candidate behaviors')
# %%
# which neurons talk to neurons of candidate behaviors
from tqdm import tqdm
def plot_12order(skids_list, edges, hops, pairs, figsize, behavior):
ascendings = Celltype_Analyzer.get_skids_from_meta_annotation('mw A1 ascending')
us = [Promat.upstream_multihop(edges=edges, sources=skids_list[i], hops=hops, pairs=pairs) for i in tqdm(range(0, len(skids_list)))]
ds = [Promat.downstream_multihop(edges=edges, sources=skids_list[i], hops=hops, pairs=pairs, exclude=ascendings) for i in tqdm(range(0, len(skids_list)))]
# UPSTREAM
us_1order = Celltype_Analyzer([Celltype(str(skids_list[i][0]) + '_us_1o', x[0]) for i, x in enumerate(us)])
us_2order = Celltype_Analyzer([Celltype(str(skids_list[i][0]) + '_us_2o', x[1]) for i, x in enumerate(us)])
us_1order.set_known_types(celltypes)
us_2order.set_known_types(celltypes)
# DOWNSTREAM
ds_1order = Celltype_Analyzer([Celltype(str(skids_list[i][0]) + '_ds_1o', x[0]) for i, x in enumerate(ds)])
ds_2order = Celltype_Analyzer([Celltype(str(skids_list[i][0]) + '_ds_2o', x[1]) for i, x in enumerate(ds)])
ds_1order.set_known_types(celltypes)
ds_2order.set_known_types(celltypes)
labels = us_1order.memberships().T.columns
errwidth = 0.5
fig, axs = plt.subplots(2,2, figsize=figsize)
fig.tight_layout(pad=3.0)
ax = axs[0,0]
sns.barplot(data=us_1order.memberships().T, ax=ax, errwidth=errwidth)
ax.set(ylim=(0, 1), title=f'{behavior}_upstream1')
ax.set_xticklabels(labels, rotation=45, ha='right')
ax = axs[0,1]
sns.barplot(data=us_2order.memberships().T, ax=ax, errwidth=errwidth)
ax.set(ylim=(0, 1), title=f'{behavior}_upstream2')
ax.set_xticklabels(labels, rotation=45, ha='right')
ax = axs[1,0]
sns.barplot(data=ds_1order.memberships().T, ax=ax, errwidth=errwidth)
ax.set(ylim=(0, 1), title=f'{behavior}_downstream1')
ax.set_xticklabels(labels, rotation=45, ha='right')
ax = axs[1,1]
sns.barplot(data=ds_2order.memberships().T, ax=ax, errwidth=errwidth)
ax.set(ylim=(0, 1), title=f'{behavior}_downstream2')
ax.set_xticklabels(labels, rotation=45, ha='right')
plt.savefig(f'plots/dVNC_{behavior}_partners.pdf', bbox_inches='tight')
return(us_1order.memberships(), us_2order.memberships(), ds_1order.memberships(), ds_2order.memberships())
hops = 2
figsize = (4,4)
fwd_us, _, _, _ = plot_12order(skids_list=fwd_skids, edges=edges, hops=hops, pairs=pairs, figsize=figsize, behavior='forward')
speed_us, _, _, _ = plot_12order(skids_list=speed_skids, edges=edges, hops=hops, pairs=pairs, figsize=figsize, behavior='speed')
turn_us, _, _, _ = plot_12order(skids_list=turn_skids, edges=edges, hops=hops, pairs=pairs, figsize=figsize, behavior='turn')
backup_us, _, _, _ = plot_12order(skids_list=backup_skids, edges=edges, hops=hops, pairs=pairs, figsize=figsize, behavior='backup')
head_us, _, _, _ = plot_12order(skids_list=head_hunch_skids, edges=edges, hops=hops, pairs=pairs, figsize=figsize, behavior='hunch_head-move')
all_us, _, _, _ = plot_12order(skids_list=[list(x) for x in list(dVNC_pairs.values)], edges=edges, hops=hops, pairs=pairs, figsize=figsize, behavior='all')
# %%
# comparison of LHN/MBON connectivity to dVNCs of candidate behaviors
def prep_df(df_source, behavior):
df = df_source.copy()
df['class'] = df.index
df['behavior'] = [behavior]*len(df.index)
return(df.melt(['class', 'behavior']))
fwd_df = prep_df(fwd_us, 'forward')
speed_df = prep_df(speed_us, 'speed')
turn_df = prep_df(turn_us, 'turn')
backup_df = prep_df(backup_us, 'backup')
head_df = prep_df(head_us, 'head')
all_df = prep_df(all_us, 'all')
df = pd.concat([fwd_df, turn_df, backup_df, speed_df, head_df, all_df], axis=0)
df = df.set_index('class', drop=False)
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.barplot(data = df.loc[['LHNs', 'MBONs']], x='behavior', y='value', hue='class', order=['turn', 'backup', 'forward'], capsize=0.1, ci=68, ax=ax)
ax.set(ylim=(0, 0.25), ylabel='Fraction upstream partners')
plt.savefig(f'plots/dVNC_turn-back-forward_upstream_LHN-MBON.pdf', bbox_inches='tight')
# %%
# combine all data types for dVNCs: us1o, us2o, ds1o, ds2o, projectome
fraction_cell_types_1o_us = pd.DataFrame([x.iloc[:, 0] for x in fraction_types], index = fraction_types_names).T
fraction_cell_types_1o_us.columns = [f'1o_us_{x}' for x in fraction_cell_types_1o_us.columns]
unk_col = 1-fraction_cell_types_1o_us.sum(axis=1)
unk_col[unk_col==11]=0
fraction_cell_types_1o_us['1o_us_unk']=unk_col
fraction_cell_types_2o_us = pd.DataFrame([x.iloc[:, 1] for x in fraction_types], index = fraction_types_names).T
fraction_cell_types_2o_us.columns = [f'2o_us_{x}' for x in fraction_cell_types_2o_us.columns]
unk_col = 1-fraction_cell_types_2o_us.sum(axis=1)
unk_col[unk_col==11]=0
fraction_cell_types_2o_us['2o_us_unk']=unk_col
fraction_cell_types_1o_ds = pd.DataFrame([x.iloc[:, 0] for x in fraction_types_ds], index = fraction_types_names).T
fraction_cell_types_1o_ds.columns = [f'1o_ds_{x}' for x in fraction_cell_types_1o_ds.columns]
unk_col = 1-fraction_cell_types_1o_ds.sum(axis=1)
unk_col[unk_col==11]=0
fraction_cell_types_1o_ds['1o_ds_unk']=unk_col
fraction_cell_types_1o_ds[fraction_cell_types_1o_ds==-1]=0
fraction_cell_types_2o_ds = pd.DataFrame([x.iloc[:, 1] for x in fraction_types_ds], index = fraction_types_names).T
fraction_cell_types_2o_ds.columns = [f'2o_ds_{x}' for x in fraction_cell_types_2o_ds.columns]
unk_col = 1-fraction_cell_types_2o_ds.sum(axis=1)
unk_col[unk_col==11]=0
fraction_cell_types_2o_ds['2o_ds_unk']=unk_col
fraction_cell_types_2o_ds[fraction_cell_types_2o_ds==-1]=0
all_data = dVNC_projectome_pairs_summed_output_norm.copy()
all_data.index = [int(x) for x in all_data.index]
all_data = pd.concat([fraction_cell_types_1o_us, fraction_cell_types_2o_us, all_data, fraction_cell_types_1o_ds, fraction_cell_types_2o_ds], axis=1)
all_data.fillna(0, inplace=True)
# clustered version of all_data combined
cluster = sns.clustermap(all_data, col_cluster = False, figsize=(30,30), rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/clustered_projectome_all_data.pdf', bbox_inches='tight')
order = cluster.dendrogram_row.reordered_ind
fig,ax=plt.subplots(1,1,figsize=(6,4))
sns.heatmap(all_data.iloc[order, :].drop(list(fraction_cell_types_1o_us.columns) + list(fraction_cell_types_2o_us.columns) + list(fraction_cell_types_1o_ds.columns) + list(fraction_cell_types_2o_ds.columns), axis=1), ax=ax, rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/clustered_projectome_all_data_same_size.pdf', bbox_inches='tight')
cluster = sns.clustermap(all_data.drop(['1o_us_pre-dVNC', '2o_us_pre-dVNC'], axis=1), col_cluster = False, figsize=(20,15), rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/clustered_projectome_all_data_removed_us-pre-dVNCs.pdf', bbox_inches='tight')
# decreasing sort of all_data but with feedback and non-feedback dVNC clustered
for i in range(1, 50):
dVNCs_with_FB = all_data.loc[:, list(fraction_cell_types_1o_ds.columns) + list(fraction_cell_types_2o_ds.columns)].sum(axis=1)
dVNCs_FB_true_skids = dVNCs_with_FB[dVNCs_with_FB>0].index
dVNCs_FB_false_skids = dVNCs_with_FB[dVNCs_with_FB==0].index
dVNC_projectome_pairs_summed_output_sort = all_data.copy()
dVNC_projectome_pairs_summed_output_sort = dVNC_projectome_pairs_summed_output_sort.loc[:, ['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']]
dVNC_projectome_pairs_summed_output_sort = dVNC_projectome_pairs_summed_output_sort.loc[dVNCs_FB_true_skids]
dVNC_projectome_pairs_summed_output_sort[dVNC_projectome_pairs_summed_output_sort<(i/100)]=0
dVNC_projectome_pairs_summed_output_sort.sort_values(by=['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'], ascending=False, inplace=True)
row_order_FB_true = dVNC_projectome_pairs_summed_output_sort.index
second_sort = all_data.copy()
second_sort = second_sort.loc[:, ['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']]
second_sort = second_sort.loc[dVNCs_FB_false_skids]
second_sort[second_sort<(i/100)]=0
second_sort.sort_values(by=['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'], ascending=False, inplace=True)
row_order_FB_false = second_sort.index
row_order = list(row_order_FB_true) + list(row_order_FB_false)
fig, ax = plt.subplots(figsize=(20, 15))
sns.heatmap(all_data.loc[row_order, :], ax=ax, rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/splitFB_projectome_0.{i}-sort-threshold.pdf', bbox_inches='tight')
fig, ax = plt.subplots(figsize=(6,4))
sns.heatmap(all_data.loc[row_order, ['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']], ax=ax, rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/splitFB_same-size_projectome_0.{i}-sort-threshold.pdf', bbox_inches='tight')
# %%
# what fraction of us and ds neurons are from different cell types per hop?
fraction_cell_types_1o_us = | pd.DataFrame([x.iloc[:, 0] for x in fraction_types], index = fraction_types_names) | pandas.DataFrame |
#!/usr/bin/python
#
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The Astrobee platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd
import os
import sys
def save_rmse_results_to_csv(rmses, prefix='', rmses_2=None, label_1=None, label_2=None):
mean_rmses_dataframe = pd.DataFrame()
labels = []
if label_1 and label_2 and rmses_2 is not None:
labels.append(label_1)
labels.append(label_2)
if labels:
mean_rmses_dataframe['Label'] = labels
mean_rmses_list = []
mean_rmses_list.append(rmses.mean())
relative_rmses = []
relative_change_in_rmses = []
if rmses_2 is not None:
mean_rmses_list.append(rmses_2.mean())
relative_rmses.append(mean_rmses_list[0] / mean_rmses_list[1])
relative_rmses.append(mean_rmses_list[1] / mean_rmses_list[0])
relative_change_in_rmses.append(mean_rmses_list[0] / mean_rmses_list[1] - 1.0)
relative_change_in_rmses.append(mean_rmses_list[1] / mean_rmses_list[0] - 1.0)
mean_rmses_dataframe['rel_' + prefix + 'rmse_%'] = relative_rmses
mean_rmses_dataframe['rel_' + prefix + 'rmse_delta_%'] = relative_change_in_rmses
mean_rmses_dataframe['mean_' + prefix + 'rmse'] = mean_rmses_list
mean_rmses_csv_file = 'mean_rmses.csv'
mean_rmses_dataframe.to_csv(mean_rmses_csv_file, index=False, mode='a')
return mean_rmses_list, labels, relative_rmses, relative_change_in_rmses
def rmse_plot(pdf, x_axis_vals, shortened_bag_names, rmses, prefix='', label_1='', rmses_2=None, label_2=''):
plt.figure()
plt.plot(x_axis_vals, rmses, 'b', label=label_1, linestyle='None', marker='o', markeredgewidth=0.1, markersize=10.5)
if rmses_2 is not None:
plt.plot(x_axis_vals,
rmses_2,
'r',
label=label_2,
linestyle='None',
marker='o',
markeredgewidth=0.1,
markersize=10.5)
plt.legend(prop={'size': 8}, bbox_to_anchor=(1.05, 1))
plt.xticks(x_axis_vals, shortened_bag_names, fontsize=7, rotation=20)
plt.ylabel(prefix + ' RMSE')
plt.title(prefix + ' RMSE vs. Bag')
x_range = x_axis_vals[len(x_axis_vals) - 1] - x_axis_vals[0]
x_buffer = x_range * 0.1
# Extend x axis on either side to make data more visible
plt.xlim([x_axis_vals[0] - x_buffer, x_axis_vals[len(x_axis_vals) - 1] + x_buffer])
plt.tight_layout()
pdf.savefig()
plt.close()
def save_rmse_stats_to_plot(pdf, rmses, prefix='', label_1='', rmses_2=None, label_2=''):
# Plot mean rmses
mean_rmses, labels, relative_rmses, relative_change_in_rmses = save_rmse_results_to_csv(
rmses, prefix, rmses_2, label_1, label_2)
mean_rmses_1_string = prefix + 'rmse: ' + str(mean_rmses[0])
if labels:
mean_rmses_1_string += ', label: ' + labels[0]
plt.figure()
plt.axis('off')
plt.text(0.0, 1.0, mean_rmses_1_string)
if len(mean_rmses) > 1:
mean_rmses_2_string = prefix + 'rmse: ' + str(mean_rmses[1])
if labels:
mean_rmses_2_string += ', label: ' + labels[1]
plt.text(0.0, 0.85, mean_rmses_2_string)
relative_rmses_string = prefix + 'rel rmse %: ' + str(100 * relative_rmses[0])
plt.text(0.0, 0.7, relative_rmses_string)
relative_rmses_change_string = prefix + 'rel change in rmse %: ' + str(100 * relative_change_in_rmses[0])
plt.text(0.0, 0.55, relative_rmses_change_string)
pdf.savefig()
def rmse_plots(pdf,
x_axis_vals,
shortened_bag_names,
rmses,
integrated_rmses,
orientation_rmses,
prefix='',
label_1='',
rmses_2=None,
integrated_rmses_2=None,
orientation_rmses_2=None,
label_2=''):
rmse_plot(pdf, x_axis_vals, shortened_bag_names, rmses, prefix + ' ', label_1, rmses_2, label_2)
if integrated_rmses is not None:
rmse_plot(pdf, x_axis_vals, shortened_bag_names, integrated_rmses, prefix + ' Integrated ', label_1,
integrated_rmses_2, label_2)
rmse_plot(pdf, x_axis_vals, shortened_bag_names, orientation_rmses, prefix + ' Orientation ', label_1,
orientation_rmses_2, label_2)
save_rmse_stats_to_plot(pdf, rmses, prefix + ' ', label_1, rmses_2, label_2)
if integrated_rmses is not None:
save_rmse_stats_to_plot(pdf, integrated_rmses, prefix + ' Integrated ', label_1, integrated_rmses_2, label_2)
save_rmse_stats_to_plot(pdf, orientation_rmses, prefix + ' Orientation ', label_1, orientation_rmses_2, label_2)
def create_plot(output_file, csv_file, label_1='', csv_file_2=None, label_2='', imu_augmented_2=True):
dataframe = | pd.read_csv(csv_file) | pandas.read_csv |
import os
import gc
import time
import datetime
import pickle as pk
from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor
import matplotlib.pyplot as plt
import numpy as np
import pandas_market_calendars as mcal
import pandas as pd
from tqdm import tqdm
import wrds
import wrds_utils as wu
FILEPATH = '/home/nate/Dropbox/data/wrds/compustat_north_america/'
hdf_settings = {'key': 'data',
'mode': 'w',
'complib': 'blosc',
'complevel': 9}
hdf_settings_table = {'key': 'data',
'mode': 'a',
'append': True,
'format': 'table',
'complib': 'blosc',
'complevel': 9}
secd_cols_to_use = ['ajexdi', # Adjusted Price = (PRCCD / AJEXDI ); “Understanding the Data” on page 91 and on (chapter 6)
'cshoc', # shares outstanding
'cshtrd', # volume
'curcdd',
'datadate',
'eps',
'gvkey',
'iid',
'prccd', # close
'prchd', # high
'prcld', # low
'prcod'] # open
secd_cols = ','.join(secd_cols_to_use)
def make_db_connection():
"""
creates connection to WRDS database
need to enter credentials to log in
"""
wrds_uname = os.environ.get('wrds_username')
wrds_pass = os.environ.get('wrds_password')
# tries to use pgpass file; see here:
# https://wrds-www.wharton.upenn.edu/pages/support/accessing-wrds-remotely/troubleshooting-pgpass-file-remotely/
db = wrds.Connection(wrds_username=wrds_uname, wrds_password=wrds_pass)
# saves credentials, but not pgpass working
# db.create_pgpass_file()
return db
def check_if_up_to_date(db, df_filepath, table, library='comp'):
"""
checks if current rows is less than rows in db on WRDS;
returns True is up to date; False if not
Keyword arguments:
db -- database connection (from make_db_connection)
df_filepath --
table -- string, name of table (e.g. names_ix)
library -- string, name of library (e.g. comp for compustat)
"""
if os.path.exists(df_filepath):
current_df = pd.read_hdf(df_filepath)
current_rows = current_df.shape[0]
else:
current_rows = 0
nrows = db.get_row_count(library=library, table=table)
if nrows == current_rows:
print('up to date')
return True, nrows
elif nrows < current_rows:
print('number of available rows is less than number in current db;')
print('something is wrong...')
return True, nrows
else:
if (nrows - 1) == current_rows and table == 'sec_shortint':
print('off by one row, but probly up to date on sec_shortint')
return True, nrows
print('db needs updating')
return False, nrows
def download_small_table(db, table, library='comp', return_table=False):
"""
downloads table if needs updating
This is intended for smaller tables, where the entire table can be
downloaded at once and can overwrite the old table.
table can be a tablename in the library; common ones for compustat (comp) are:
security
names_ix
idxcst_his
.h5 files have same name as table
Keyword arguments:
db -- database connection (from make_db_connection)
table -- string, name of table (e.g. names_ix)
library -- string, name of library (e.g. comp for compustat)
return_table -- boolean, if True, returns downloaded dataframe
"""
df_filepath = FILEPATH + 'hdf/{}.hdf'.format(table)
up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library)
last_updated_fp = FILEPATH + 'last_updated_dates.pk'
if os.path.exists(last_updated_fp):
updated_record = pk.load(open(last_updated_fp, 'rb'))
else:
updated_record = {}
updated_record[table] = pd.Timestamp.today(tz='US/Eastern')
if up_to_date:
pk.dump(updated_record, open(last_updated_fp, 'wb'), protocol=-1)
if return_table:
return None
return
df = db.get_table(library=library, table=table)
if table == 'idxcst_his':
# converts date columns to datetime
df['from'] = | pd.to_datetime(df['from'], utc=True) | pandas.to_datetime |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = | Timedelta('1 days') | pandas.Timedelta |
#!/usr/bin/python3
"""
Last update: March 2021
Author: <NAME>, PhD - The Scripps Research Institute, La Jolla (CA)
Contact info: <EMAIL>
GitHub project repository: https://github.com/ldascenzo/pytheas
***DESCRIPTION***
Preliminary work-work-in-progress step towards the Pytheas support of discovery mode. At the present state, the user can
specify one or more modifications that will be added in any possible combination to the available sequences extracted
from the output.2 file.
If for instance a methylated C is requested, all the instances of C with all possible combination of modified/unmodified
are computed. Note that the process can become computationally intensive if many "discovery" modifications
are added together for long sequences (it is advised to use only one for this version)
***OPTIONS***
--nts_light (REQUIRED): Excel spreadsheet with the standard nucleotides
and optional modification alphabet.
--mod_discovery (REQUIRED): text file with RNA modifications, must be in the same directory of the script. Details
on format and usage can be found in the Digest section of the Pytheas manual
--max_mods_per_fragment (OPTIONAL): Specify the maximum number of variable modifications allowed per RNA fragment
(Default = 2)
***OUTPUT***
1) output.2 -> same format of output.1 with added info on modified nucleotides.
"""
import argparse
import os
import sys
from itertools import permutations
from itertools import product
import pandas as pd
# Initialize and define launch options
parser = argparse.ArgumentParser(description='List of available options')
parser.add_argument('--mod_discovery', required=True,
help='File with molecule-specific RNA modifications '
'(Required, be sure to fill the Originating_base column!!!!)')
parser.add_argument('--nts_light', required=True,
help='Excel spreadsheet with the standard nucleotides and optional modification alphabet '
'(Required)')
parser.add_argument('--max_mods_per_fragment', default=2, type=int,
help='Specify the maximum number of variable modifications allowed per RNA fragment (Default = 2)')
args = parser.parse_args()
def read_excel_input(nts_alph=args.nts_light):
"""
Create three dictionaries mod_alphabet, mod_origin and mod_partial
* mod_alphabet contains all ID : ID_ext couples, one letter and extended IDs or all modified base
* mod_origin contains all the unmodified nucleotides and is used for modification positions validation
* mod_partial contains all the partial modified nucleotides for each modification (as from the alphabet file)
in case the option for partial modifications is selected
"""
mod_origin, mod_alphabet = {}, {}
if nts_alph:
# Checking that the nts_light file given in argument exists
if not os.path.exists(nts_alph):
print("ERROR! File {} does not exist. Execution terminated without output".format(nts_alph))
sys.exit(1)
# Create a dataframe with info from Excel spreadsheet
df = pd.read_excel(nts_alph, header=12)
# Drop rows with the 4 standard nucleobases
df = df[df.ID != 'C']
df = df[df.ID != 'G']
df = df[df.ID != 'A']
df = df[df.ID != 'U']
# Drop rows with NaN values
df = df[pd.notnull(df['ID'])]
# Transform all ID values in string (so numbers can be used as one letter code for bases)
df = df.astype({"ID": str})
# Create a dictionary with the pair ID : ID_ext as modification alphabet
mod_alphabet = dict(zip(df.ID, df.ID_ext))
# Create a dictionary with the ID : Originating_base for quality control purposes
mod_origin = dict(zip(df.ID, df.Originating_base))
return mod_origin, mod_alphabet
def parse_input(modfile):
"""
Create a list with all the modifications to consider for discovery mode
"""
# Checking that the nts_light file given in argument exists
if not os.path.exists(modfile):
print("ERROR! File {} does not exist. Execution terminated without output".format(modfile))
sys.exit(1)
with open(modfile, 'r') as f:
for line in f:
yield line.strip().split()
def mod_input(mods, header):
"""
Create a dataframe with all the input modification to be included into discovery mode
"""
df = | pd.DataFrame(mods) | pandas.DataFrame |
# Package
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from pandas import DataFrame
from plot_metric.functions import MultiClassClassification
import numpy as np
import matplotlib.pyplot as plt
# Load dataset
X, y = load_digits(n_class=4, return_X_y=True)
# Add noisy features to make the problem harder
random_state = np.random.RandomState(23)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 1000 * n_features)]
# Split into train and test set
X_train, X_test, y_train, y_test = train_test_split( | DataFrame(X) | pandas.DataFrame |
# coding: utf-8
# # Notebook to generate a dataframe that captures data reliability
# Perform a series of tests/questions on each row and score the result based on 0 (missing), 1 (ambiguous), 2 (present)
# - is the plot number recorded? If not, this makes it very difficult to identify the plot as unique vs others (2 if different from 1.2)
# - is the type of property recorded? Very difficult to interpret the results if we don’t know this
# - does the plot have a zone? (Other means ambiguous)
# - does the plot have a zone section?
# - does the plot have toilets (sum should not include disused)
# - does the plot receive water?
# - who is the respondent (2 for landlord, 1 for caretaker and tenant, 0 for unknown)
# - was gps info captured?)
# - does the number of users sum up to the initial value
# - do they know where they dispose of solid wastes?
# - do they know if the toilet has been upgraded- no not reliable if they haven’t been there 2 years
# - Do they know they age of toilet?
# - Do they give age of toilet if more than 1
# - Do they know if the toilet has been emptied?
# - Do they know how much they spent?
# - Do they know how often they empty it?
# - Do they give a value for emptying it but have never actually emptied it
# - Is the toilet accessible but has never been emptied?
# - Is property recorded as not residential but a tenant answering questions
# - Toilet is not feasible for emptying but they have
#
# ## List of possible inconsistencies that people have mentioned (excluding geospatial which are being dealt with separately
# - visit information
# - length of time of responder on plot - if units is not a number
# - weird time of visit
# - plot types
# - no Record plot number
# - record plot numbers are not equal
# - zone and gps don't correspond
# - number of families on the plot
# - number of people on plot
# - people living on the plot vs toilet users
# - toilet types
# - no toilets
# In[2]:
import pandas as pd
# In[7]:
pd.options.display.max_rows = 300
pd.options.display.max_columns = 300
pd.options.display.max_colwidth = 300
# In[289]:
data_orig = pd.read_hdf('../data/wsup/tidy/data_tidied.h5', key='main')
# In[290]:
name_changes = dict([line.strip().split('\t') for line in open('../data/name_changes.txt')])
# In[291]:
data = data_orig.rename(columns=name_changes)
# In[292]:
drops = [line.strip() for line in open('../data/drop2.txt')]
drops = data.columns.intersection(drops)
data.drop(drops, 1, inplace=True)
# In[293]:
data.shape
# In[294]:
drops = [d for d in data.columns if not (d.startswith('bool') or d.startswith('cat') or d.startswith('str') or
d.startswith('num') or d.startswith('id') or d.startswith('date'))]
drops
data.drop(drops, 1, inplace=True)
# In[295]:
data.head(10).T
# In[85]:
results = pd.DataFrame(index = data.index)
# ## Plot id
# In[347]:
data[['id_new_plot_id', 'id_plot', 'str_plot_id', 'bool_plot_id']].head()
# In[87]:
results['Plot_id'] = data[['id_new_plot_id', 'id_plot', 'str_plot_id']].apply(
lambda x: 2 if x[0] != 'None' and x[1]==x[2] else 1 if (x[0]!= 'None') else 0, axis=1)
# In[88]:
results['Plot_id'].value_counts()
# In[346]:
data.loc[results.Plot_id == 2, ['id_new_plot_id', 'id_plot', 'str_plot_id']].head()
# ## Property type
# In[91]:
p = 'Property_type'
cols = ['cat_property','cat_property_other']
# In[96]:
results[p] = data[cols].apply(
lambda x: 2 if not ('Other' in str(x[0])) or pd.isnull(x[0]) else 1 if pd.notnull(x[1]) else 0, axis=1)
# In[97]:
results[p].value_counts()
# In[100]:
data.loc[results[p] == 2, cols]
# In[102]:
data.loc[results[p] == 2, cols[0]].value_counts()
# ## Property zone
# In[104]:
p = 'Property_zone'
cols = [ 'cat_zone', 'cat_zone_other', 'cat_zone_section',
'cat_zone_section_other', 'str_zone_name']
# In[105]:
data[cols]
# In[109]:
results[p] = data[cols].apply(
lambda x: 2 if not ('Other' in str(x[0])) or pd.isnull(x[0]) else 1 if x.notnull().sum()>1 else 0, axis=1)
# In[110]:
results[p].value_counts()
# In[111]:
data.loc[results[p] == 0, cols]
# In[112]:
data.loc[results[p] == 2, cols[0]].value_counts()
# In[ ]:
# ## Toilets
# - only relevant for residential
# - suspicious if more than 1 toilet per person or no toilets
# - 0 if info unknown
# In[296]:
data['num_toilets_per_person'] = data['num_toilets_all'] / data['num_ppl'].map(lambda x: np.NaN if x==0 else x)
# In[297]:
p = 'Toilets_total'
cols = [ 'num_toilets_all', 'num_toilets_per_person', 'cat_property']
# In[298]:
data.loc[data['num_toilets_per_person'].notnull(), 'num_toilets_per_person'].hist(bins=50)
# In[299]:
data[cols]
# In[300]:
results[p] = data[cols].apply(
lambda x: np.NaN if x[2] != 'Residential Plot' else
2 if x[0]>0 and x[1]>0 and x[1] <=1 else
1 if (x[0]==0) or (x[1]>1) else
0, axis=1)
# In[301]:
results[p].value_counts()
# In[302]:
data.loc[results[p] == 0, cols]
# In[303]:
data.loc[results[p] == 2, cols[0]].value_counts()
# In[ ]:
# ## Water
# In[119]:
p = 'Water_collection'
cols = [ 'cat_water', 'cat_water_other']
# In[128]:
data[cols]
# In[130]:
results[p] = data[cols].apply(
lambda x: 2 if 'Other' not in str(x[0]) and pd.notnull(x[0]) else 1 if pd.notnull(x[1]) else 0, axis=1)
# In[131]:
results[p].value_counts()
# In[132]:
data.loc[results[p] == 0, cols]
# In[133]:
data.loc[results[p] == 2, cols[0]].value_counts()
# ## Respondent
# In[146]:
p = 'Respondent_type'
cols = [ 'cat_responder_type']
# In[147]:
data[cols]
# In[152]:
results[p] = data[cols].apply(
lambda x: 2 if x[0]=='Landlord' else 1 if x[0] in ['Caretaker', 'Tenant'] else 0, axis=1)
# In[153]:
results[p].value_counts()
# In[154]:
data.loc[results[p] == 0, cols]
# In[155]:
data.loc[results[p] == 2, cols[0]].value_counts()
# # GPS information
# In[159]:
data.columns
# In[161]:
p = 'GPS_presence'
cols = ['str_gps_lat']
# In[162]:
data[cols]
# In[163]:
results[p] = data[cols].apply(
lambda x: 2 if pd.notnull(x[0]) else 0, axis=1)
# In[164]:
results[p].value_counts()
# In[165]:
data.loc[results[p] == 0, cols]
# In[166]:
data.loc[results[p] == 2, cols[0]].value_counts()
# # Number of users and num of ppl
# In[168]:
p = 'People_numbers_consistency'
cols = ['num_ppl', 'num_c_m', 'num_c_f', 'num_a_m',
'num_a_f']
# In[169]:
data[cols]
# In[170]:
results[p] = data[cols].apply(
lambda x: 2 if x[1:].sum()==x[0] else 1 if abs(x[1:].sum()-x[0]) < 2 else 0, axis=1)
# In[171]:
results[p].value_counts()
# In[172]:
data.loc[results[p] == 0, cols]
# In[173]:
data.loc[results[p] == 2, cols[0]].value_counts()
# ## People household - not relevant if not residential
# In[304]:
import numpy as np
# In[305]:
data['ppl_per_household'] = data['num_ppl'] / data['num_hhs']
# In[306]:
p = 'People_household'
cols = ['num_ppl', 'num_hhs', 'cat_property', 'ppl_per_household']
# In[307]:
ax = data['ppl_per_household'].hist(bins=100)
ax.set_yscale('log')
ax.set_xscale('log')
# In[308]:
data.loc[(data.cat_property == 'Residential Plot') & (data.ppl_per_household > 20),cols]
# In[309]:
results[p] = data[cols].apply(
lambda x: np.NaN if x[2] != 'Residential Plot' else
2 if x[0] > x[1] and x[3] <=20 else 1 if x.isnull().sum()==0 else 0, axis=1)
# In[310]:
data.columns
# In[311]:
data.loc[results[p] == 2, cols]
# In[312]:
results[p].value_counts()
# In[180]:
data.loc[results[p] == 2, cols]
# ## Solid wastes
# In[244]:
p = 'Solid waste'
cols = [ 'cat_waste', 'cat_waste_other']
# In[245]:
data[cols].head()
# In[246]:
results[p] = data[cols].apply(
lambda x: 2 if 'Other' not in str(x[0]) and pd.notnull(x[0]) else 1 if pd.notnull(x[1]) else 0, axis=1)
# In[247]:
results[p].value_counts()
# In[250]:
data.loc[results[p] == 0, cols]
# In[173]:
data.loc[results[p] == 2, cols[0]].value_counts()
# # Responder time vs age of toilet
# In[313]:
data['num_time_responder'] = np.NaN
for z in ['num_landlord_time','num_caretaker_time', 'num_tenant_time']:
data['num_time_responder'] = data[['num_time_responder', z]].apply(
lambda row:row[z] if pd.notnull(row[z]) and pd.isnull(row['num_time_responder']) else row['num_time_responder'], axis=1)
# In[314]:
for t in ['1','2', '3']:
data['num_toilet%s_age' % t] = data['num_toilet%s_age_m' % t].map(
lambda x:0 if | pd.isnull(x) | pandas.isnull |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
import torch
from torch.utils.data import Dataset
from transformers import DistilBertTokenizerFast,DistilBertForSequenceClassification
from transformers import Trainer,TrainingArguments
from transformers import DistilBertTokenizerFast, BertForMaskedLM
from transformers import AutoConfig
from transformers import AutoModel
from sklearn.metrics import accuracy_score, f1_score
from torch import nn
from transformers import Trainer
class SarcasmDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
## Test Dataset
class SarcasmTestDataset(torch.utils.data.Dataset):
def __init__(self, encodings):
self.encodings = encodings
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
return item
def __len__(self):
return len(self.encodings)
class CustomTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
labels = inputs.get("labels")
# forward pass
outputs = model(**inputs)
logits = outputs.get('logits')
# compute custom loss
loss_fct = nn.CrossEntropyLoss(weight=torch.tensor([0.1, 0.3]))
loss = loss_fct(logits.view(-1, self.model.config.num_labels), labels.view(-1))
return (loss, outputs) if return_outputs else loss
def compute_metrics(p):
pred, labels = p
pred = np.argmax(pred, axis=1)
accuracy = accuracy_score(y_true=labels, y_pred=pred)
f1 = f1_score(labels, pred)
return {"accuracy": accuracy,"f1_score":f1}
def labels(x):
if x == 0:
return 0
else:
return 1
if __name__ == '__main__':
path = '../../Data/Train_Dataset.csv'
path_test = '../../Data/Test_Dataset.csv'
df = pd.read_csv(path)
test = | pd.read_csv(path_test) | pandas.read_csv |
import os
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random as rn
import warnings
from kneed import KneeLocator
class BuildDriftKnowledge():
"""
Description :
Class to build the pareto knowledge from hyper-parameters configurations evaluated on differents datasets for the drift detector tuning.
The knowledge consists in the best configuration of hyper-parameters for each dataset.
The datasets are characterised by meta-features and a knowledge base can be then be built to link these features to the best configurations.
Parameters :
results_directory: str
Path to the directory containing the knowledge files (results of the evaluation of the configurations on example streams)
names_detectors: list of str
List of the names of the detectors
names_streams: list of str
list of the names of the streams
n_meta_features: int, default = 15 ((severity, magnitude, interval) * (med, kurto, skew, per10, per90))
Number of meta-features extracted from the stream
NOT USED FOR THE MOMENT as we use theoritical meta-features and not measured ones
knowledge_type: str
String indicating what knowledge is being calculated (for arf tree tuning or drift detectors)
NOT USED FOR THE MOMENT, need further implementing to bring the two applications together
output: str
Directory path where to save output file
verbose: bool, default = False
Print pareto figures if True
Output:
Csv file containing the configurations selected for each example stream (each row = 1 stream)
Example:
>>> names_stm = ['BernouW1ME0010','BernouW1ME005095','BernouW1ME00509','BernouW1ME0109','BernouW1ME0108','BernouW1ME0208','BernouW1ME0207','BernouW1ME0307','BernouW1ME0306','BernouW1ME0406','BernouW1ME0506','BernouW1ME05506',
>>> 'BernouW100ME0010','BernouW100ME005095','BernouW100ME00509','BernouW100ME0109','BernouW100ME0108','BernouW100ME0208','BernouW100ME0207','BernouW100ME0307','BernouW100ME0306','BernouW100ME0406','BernouW100ME0506','BernouW100ME05506',
>>> 'BernouW500ME0010','BernouW500ME005095','BernouW500ME00509','BernouW500ME0109','BernouW500ME0108','BernouW500ME0208','BernouW500ME0207','BernouW500ME0307','BernouW500ME0306','BernouW500ME0406','BernouW500ME0506','BernouW500ME05506']
>>>
>>> names_detect = [['PH1','PH2','PH3','PH4','PH5','PH6','PH7','PH8','PH9','PH10','PH11','PH12','PH13','PH14','PH15','PH16'],
>>> ['ADWIN1','ADWIN2','ADWIN3','ADWIN4','ADWIN5','ADWIN6','ADWIN7','ADWIN8','ADWIN9'],
>>> ['DDM1','DDM2','DDM3','DDM4','DDM5','DDM6','DDM7','DDM8','DDM9','DDM10'],
>>> ['SeqDrift21','SeqDrift22','SeqDrift23','SeqDrift24','SeqDrift25','SeqDrift26','SeqDrift27','SeqDrift28','SeqDrift29','SeqDrift210',
>>> 'SeqDrift211','SeqDrift212','SeqDrift213','SeqDrift214','SeqDrift215','SeqDrift216','SeqDrift217','SeqDrift218']]
>>>
>>> output_dir = os.getcwd()
>>> directory_path_files = 'examples/pareto_knowledge/ExampleDriftKnowledge' # Available in hyper-param-tuning-examples repository
>>>
>>> pareto_build = BuildDriftKnowledge(results_directory=directory_path_files, names_detectors=names_detect, names_streams=names_stm, output=output_dir, verbose=True)
>>> pareto_build.load_drift_data()
>>> pareto_build.calculate_pareto()
>>> pareto_build.best_config
"""
def __init__(self,
results_directory,
names_detectors,
names_streams,
output,
# n_meta_features = 15,
# knowledge_type = 'Drift',
verbose = False):
if results_directory != None and names_detectors != None and names_streams != None and output != None:
self.results_directory = results_directory
self.names_detectors = names_detectors
self.names_streams = names_streams
self.output = output
else :
raise ValueError('Directory paths or list of detectors names or list of streams missing.')
self.verbose = verbose
# self.knowledge_type = knowledge_type
self.n_detectors = 4
# self.n_meta_features = n_meta_features
self.n_streams = len(self.names_streams)
self.best_config = [[] for ind_s in range(self.n_streams)]
warnings.filterwarnings("ignore")
@property
def best_config(self):
""" Retrieve the length of the stream.
Returns
-------
int
The length of the stream.
"""
return self._best_config
@best_config.setter
def best_config(self, best_config):
""" Set the length of the stream
Parameters
----------
length of the stream : int
"""
self._best_config = best_config
def load_drift_data(self) :
"""
Function to load the performance data from the csv files
"""
# Variables for performances of the detectors
self.scores_perf = []
self.list_name_detec_ok = []
for ind_s in range(self.n_streams) :
self.scores_perf.append([[] for ind_d in range(self.n_detectors)])
self.list_name_detec_ok.append([[] for ind_d in range(self.n_detectors)])
# Variables for the meat-features
self.mean_meta_features = []
self.std_meta_features = []
for ind_s in range(self.n_streams) :
self.mean_meta_features.append([[] for ind_d in range(self.n_detectors)])
self.std_meta_features.append([[] for ind_d in range(self.n_detectors)])
ind_s = 0
# Loop through the streams
for stream_name in self.names_streams :
# Open the performance file for the given stream
stream_perf_file = os.sep.join([self.results_directory, os.sep, stream_name + 'PerfDetectors.csv'])
with open(stream_perf_file) as csv_data_file:
data_p = [row for row in csv.reader(csv_data_file)]
ind_d = 0
# Loop through the detectors
for name_detector in self.names_detectors :
# Loop through the detectors configurations
for name_ind_detector in name_detector :
# Get the row of performances for the given configuration
rowind_detector = next((x for x in data_p if x[0] == name_ind_detector), None)
# Only if no Nan value in the row (which mean that the detector detected no drifts at some point)
if 'nan' not in rowind_detector :
# Store the TP number and the FP number for the given detector
self.scores_perf[ind_s][ind_d].append([float(rowind_detector[2]),float(rowind_detector[3])])
self.list_name_detec_ok[ind_s][ind_d].append(name_ind_detector)
ind_d += 1
# # Open the meta-features file for the given stream
# stream_meta_feat_file = self.results_directory+'\\'+stream_name+'metaFeatDetectors.csv'
# with open(stream_meta_feat_file) as csv_data_file:
# data_mf = [row for row in csv.reader(csv_data_file)]
# ind_d = 0
# # Loop through the detectors
# for name_detector in self.names_detectors :
# list_meta_feat_values = [[] for i in range(self.n_meta_features)] # list to store values of each of the meta-features for each detector type
# # Loop through the detectors configurations
# for name_ind_detector in name_detector :
# ind_detec = [i for i in range(len(data_mf)) if data_mf[i][0] == name_ind_detector][0]
# # Loop for each meta-feature
# for ind_meta_feat in range(self.n_meta_features) :
# list_meta_feat_values[ind_meta_feat].append(float(data_mf[ind_detec+ind_meta_feat+1][1]))
# self.mean_meta_features[ind_s][ind_d] = np.nanmean(list_meta_feat_values, axis = 1)
# self.std_meta_features[ind_s][ind_d] = np.nanstd(list_meta_feat_values, axis = 1)
# ind_d += 1
ind_s += 1
# print('end')
def process_meta_features(self):
print('Start process meta-features')
# TODO : to come
def calculate_pareto(self) :
"""
Function to calculate the Pareto front and detect the knee point
"""
if self.verbose == True :
print('Start Pareto calculation')
for ind_s in range(self.n_streams) :
for ind_d in range(self.n_detectors) :
names = self.list_name_detec_ok[ind_s][ind_d]
score = np.array(self.scores_perf[ind_s][ind_d])
# Calculate pareto front
pareto = self.identify_pareto(score)
# print ('Pareto front index values')
# print ('Points on Pareto front: \n',pareto)
pareto_front = score[pareto]
# print ('\nPareto front scores')
# print (pareto_front)
pareto_front_df = | pd.DataFrame(pareto_front) | pandas.DataFrame |
import matplotlib
matplotlib.use("TKagg")
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
#Simple Linear Regression
#y = ax + b
rng = np.random.RandomState(1)
x = 10 * rng.rand(50)
y = 2 * x - 5 + rng.randn(50)
plt.scatter(x, y);
plt.show()
plt.clf()
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=True)
model.fit(x[:, np.newaxis], y)
xfit = np.linspace(0,10,1000)
yfit = model.predict(xfit[:, np.newaxis])
plt.scatter(x,y)
plt.plot(xfit,yfit)
plt.show()
plt.clf()
#relvent info for the line
print("Model slope: ", model.coef_[0])
print("Model intercept:", model.intercept_)
#we can do much more
rng = np.random.RandomState(1)
X = 10 * rng.rand(100, 3)
y = 0.5 + np.dot(X, [1.5, -2., 1.])
model.fit(X,y)
print(model.intercept_)
print(model.coef_)
#Basic Function Regression
##Polynomial basis functions
from sklearn.preprocessing import PolynomialFeatures
x = np.array([2,3,4])
poly = PolynomialFeatures(3, include_bias=False)
print(poly.fit_transform(x[:, None]))
from sklearn.pipeline import make_pipeline
poly_model = make_pipeline(PolynomialFeatures(7), LinearRegression())
#make a 7degree polynomial
#create a noisy sine wave
rng = np.random.RandomState(1)
x = 10 * rng.rand(50)
y = np.sin(x) + 0.1 * rng.randn(50)
poly_model.fit(x[:, np.newaxis], y)
yfit = poly_model.predict(xfit[:, np.newaxis])
plt.scatter(x, y)
plt.plot(xfit, yfit);
plt.show()
plt.clf()
#Example: predicting bicycle traffic
import pandas as pd
counts = pd.read_csv('data/FremontBridge.csv', index_col='Date', parse_dates=True)
weather = pd.read_csv('data/BicycleWeather.csv', index_col='DATE',
parse_dates=True)
#compute total daily traffic
daily = counts.resample('d').sum()
daily['Total'] = daily.sum(axis=1)
daily = daily[['Total']] # cut other columns
#add binary columns for days of week
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
for i in range(7):
daily[days[i]] = (daily.index.dayofweek == i).astype(float)
#account for holidays
from pandas.tseries.holiday import USFederalHolidayCalendar
cal = USFederalHolidayCalendar()
holidays = cal.holidays('2012', '2016')
daily = daily.join(pd.Series(1, index=holidays, name='holiday'))
daily['holiday'].fillna(0, inplace=True)
# add hours of daylight
def hours_of_daylight(date, axis=23.44, latitude=47.61):
"""Compute the hours of daylight for the given date"""
days = (date - pd.datetime(2000, 12, 21)).days
m = (1. - np.tan(np.radians(latitude))
* np.tan(np.radians(axis) * np.cos(days * 2 * np.pi / 365.25)))
return 24. * np.degrees(np.arccos(1 - np.clip(m, 0, 2))) / 180.
daily['daylight_hrs'] = list(map(hours_of_daylight, daily.index))
daily[['daylight_hrs']].plot()
plt.ylim(8, 17)
plt.show()
plt.clf()
#add temp and precip
# temperatures are in 1/10 deg C; convert to C
weather['TMIN'] /= 10
weather['TMAX'] /= 10
weather['Temp (C)'] = 0.5 * (weather['TMIN'] + weather['TMAX'])
# precip is in 1/10 mm; convert to inches
weather['PRCP'] /= 254
weather['dry day'] = (weather['PRCP'] == 0).astype(int)
daily = daily.join(weather[['PRCP', 'Temp (C)', 'dry day']])
#add year counter
daily['annual'] = (daily.index - daily.index[0]).days / 365.
print(daily.head())
#drop null rows
daily.dropna(axis=0, how='any', inplace=True)
column_names = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun', 'holiday',
'daylight_hrs', 'PRCP', 'dry day', 'Temp (C)', 'annual']
X = daily[column_names]
y = daily['Total']
model = LinearRegression(fit_intercept=False)
model.fit(X,y)
daily['predicted'] = model.predict(X)
daily[['Total', 'predicted']].plot(alpha=0.5);
plt.show()
plt.clf()
params = | pd.Series(model.coef_, index=X.columns) | pandas.Series |
from django.shortcuts import render
from django.http import HttpResponse
from datetime import datetime
import psycopg2
import math
import pandas as pd
from openpyxl import Workbook
import csv
import random
def psql_pdc(query):
#credenciales PostgreSQL produccion
connP_P = {
'host' : '10.150.1.74',
'port' : '5432',
'user':'postgres',
'password':'<PASSWORD>',
'database' : 'postgres'}
#conexion a PostgreSQL produccion
conexionP_P = psycopg2.connect(**connP_P)
#print('\nConexión con el servidor PostgreSQL produccion establecida!')
cursorP_P = conexionP_P.cursor ()
#ejecucion query telefonos PostgreSQL
cursorP_P.execute(query)
anwr = cursorP_P.fetchall()
cursorP_P.close()
conexionP_P.close()
return anwr
def to_horiz(anwr_P,name,_id):
#vertical horizontal
anwr_P1 = anwr_P.pivot(index=0,columns=1)
anwr_P1[_id] = anwr_P1.index
col1 = []
i=0
for i in range(anwr_P1.shape[1]-1):
col1.append(name+str(i+1))
col1.append(_id)
anwr_P1.columns = col1
return anwr_P1
def csv_o(fn,name):
response = HttpResponse(content_type = "text/csv")
content = "attachment; filename = %s"%name
response["Content-Disposition"] = content
# for j in range(fn.shape[1]):
# try:
# fn.iloc[:,j] = fn.iloc[:,j].str.decode(encoding='utf-8-sig')
# fn.iloc[:,j] = fn.iloc[:,j].str.encode(encoding='utf_16_le')
# except:
# pass
fn2 = [tuple(x) for x in fn.values]
writer = csv.writer(response,delimiter ='|')
writer.writerow(fn.columns)
writer.writerows(fn2)
return response
def excel(fn,name):
wb = Workbook()
ws = wb.active
k = 0
a = pd.DataFrame(fn.columns)
for k in range(a.shape[0]):
ws.cell(row = 1, column = k+1).value = a.iloc[k,0]
i=0
j=0
for i in range(fn.shape[0]):
for j in range(0,fn.shape[1]):
try:
ws.cell(row = i+2, column = j+1).value = fn.iloc[i,j]
except:
pass
response = HttpResponse(content_type = "application/ms-excel")
content = "attachment; filename = %s"%name
response["Content-Disposition"] = content
wb.save(response)
return response
def excel_CV_COL(request):
today = datetime.now()
tablename = "CV_Col"+today.strftime("%Y%m%d%H") + ".xlsx"
with open("./hello/Plantillas/Colp/QueryTel_COL.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Colp/QueryCor_COL.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Colp/QueryDir_COL.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/Colp/QueryCV_COL.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/Colp/QueryCiu_COL.txt","r") as f6:
queryP_Ciu = f6.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
anwr_D = pd.DataFrame(anwrD)
anwr_Ci = pd.DataFrame(anwrCi)
df = pd.DataFrame(yanwr)
inf = to_horiz(anwr_P,'phone',"deudor_id")
infC = to_horiz(anwr_C,'mail',"deudor_id")
infD = to_horiz(anwr_D,'address',"deudor_id")
infCi = to_horiz(anwr_Ci,'town',"deudor_id")
df = df.rename(columns={0:'rownumber',
1:'obligacion_id',
2:'deudor_id',
3:'unico',
4:'estado',
5:'tipo_cliente',
6:'nombre',
7:'producto',
8:'initial_bucket',
9:'ciudad',
10:'sucursal',
11:'tipo_prod',
12:'dias_mora_inicial',
13:'dias_mora_actual',
14:'rango_mora_inicial',
15:'rango_mora_final',
16:'rango',
17:'suma_pareto',
18:'rango_pareto',
19:'fcast',
20:'fdesem',
21:'vrdesem',
22:'saldo_total_inicial',
23:'saldo_total_actual',
24:'saldo_capital_inicial',
25:'saldo_capital_actual',
26:'saldo_vencido_inicial',
27:'saldo_vencido_actual',
28:'pagomin',
29:'fultpago',
30:'vrultpago',
31:'agencia',
32:'tasainter',
33:'feultref',
34:'ultcond',
35:'fasigna',
36:'eqasicampana',
37:'diferencia_pago',
38:'pago_preliminar',
39:'pago_cliente',
40:'min',
41:'tarifa',
42:'honorarios',
43:'perfil_mes_4',
44:'perfil_mes_3',
45:'perfil_mes_2',
46:'perfil_mes_1',
47:'fecha_primer_gestion',
48:'fecha_ultima_gestion',
49:'perfil_mes_actual',
50:'contactabilidad',
51:'ultimo_alo',
52:'descod1',
53:'descod2',
54:'asesor',
55:'fecha_gestion',
56:'telefono_mejor_gestion',
57:'mejorgestionhoy',
58:'asesor_indicador_hoy',
59:'repeticion',
60:'llamadas',
61:'sms',
62:'correos',
63:'gescall',
64:'visitas',
65:'whatsapp',
66:'no_contacto',
67:'total_gestiones',
68:'telefono_positivo',
69:'marcaciones_telefono_positivo',
70:'ultima_marcacion_telefono_positivo',
71:'fec_creacion_ult_compromiso',
72:'fec_pactada_ult_compromiso',
73:'valor_acordado_ult_compromiso',
74:'asesor_ult_compromiso',
75:'cantidad_acuerdos_mes',
76:'estado_acuerdo',})
fn = pd.merge(df,inf,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infC,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infD,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infCi,on = ["deudor_id"]\
,how = "left",indicator = False)
return excel(fn,tablename)
def csv_CV_Claro(request):
today = datetime.now()
tablename = "CV_Claro" + today.strftime("%Y%m%d%H") + ".csv"
with open("./hello/Plantillas/Claro/QueryTel_Claro.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Claro/QueryCor_Claro.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Claro/QueryCV_Claro.txt","r") as f4:
queryP_cons = f4.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
yanwr = psql_pdc(queryP_cons)
#dataframes
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
df = pd.DataFrame(yanwr)
anwr_P1 = to_horiz(anwr_P,'phone','deudor_id')
#renombrar campos correos
anwr_C = anwr_C.rename(columns={
0:'deudor_id',
1:'mail0',
2:'mail1'})
anwr_C1 = anwr_C.drop_duplicates(subset=['deudor_id'])
#renombrar campos CV
df = df.rename(columns={0:'rownumber',
1:'deudor_id',
2:'obligacion_id',
3:'nombredelcliente',
4:'estado',
5:'tipo_cliente',
6:'unico',
7:'crmorigen',
8:'potencialmark',
9:'prepotencialmark',
10:'writeoffmark',
11:'dias_mora',
12:'segmento_bpo',
13:'rango_bpo',
14:'tipo',
15:'fecha_de_vencimiento',
16:'min_cliente',
17:'valorscoring',
18:'numeroreferenciadepago',
19:'monto_inicial',
20:'monto_ini_cuenta',
21:'porcentaje_descuento',
22:'valor_descuento',
23:'valor_a_pagar',
24:'deuda_real',
25:'valor_pago',
26:'saldo_pendiente',
27:'fecha_pago',
28:'fecha_compromiso',
29:'fecha_pago_compromiso',
30:'valor_compromiso',
31:'estado_acuerdo',
32:'ind_m4',
33:'ind_m3',
34:'ind_m2',
35:'ind_m1',
36:'fecha_primer_gestion',
37:'fecha_ultima_gestion',
38:'indicador',
39:'phone',
40:'asesor',
41:'fecha_gestion',
42:'contactabilidad',
43:'indicador_hoy',
44:'repeticion',
45:'llamadas',
46:'sms',
47:'correos',
48:'gescall',
49:'whatsapp',
50:'visitas',
51:'no_contacto',
52:'total_gestiones',
53:'telefono_positivo',
54:'fec_ultima_marcacion'})
#a = fn[fn.obligacion_id == '9876510000211227']
#i=0
#lin = ['no_contacto_mes_actual','gescall_mes_actual','tel_mes_actual','tel_positivo']
#for i in lin:
# df[i].fillna(0,inplace=True)
# df[i] = df[i].apply(lambda x: round(x))
# df[i] = df[i].astype('str')
fn = pd.merge(df,anwr_P1,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,anwr_C1,on = ["deudor_id"]\
,how = "left",indicator = False)
return csv_o(fn,tablename)
def csv_CV_CarP(request):
today = datetime.now()
tablename = "CV_CarP" + today.strftime("%Y%m%d%H") + ".csv"
with open("./hello/Plantillas/CarP/QueryTel_CarP.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/CarP/QueryCor_CarP.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/CarP/QueryDir_CarP.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/CarP/QueryCV_CarP.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/CarP/QueryCiu_CarP.txt","r") as f5:
queryP_Ciu = f5.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
anwr_D = pd.DataFrame(anwrD)
anwr_Ci = pd.DataFrame(anwrCi)
df = pd.DataFrame(yanwr)
inf = to_horiz(anwr_P,'phone',"deudor_id")
infC = to_horiz(anwr_C,'mail',"deudor_id")
infD = to_horiz(anwr_D,'address',"deudor_id")
infCi = to_horiz(anwr_Ci,'town',"deudor_id")
#renombrar campos CV
df = df.rename(columns={0:'deudor_id',
1:'unico',
2:'nombre',
3:'obligacion',
4:'obligacion_17',
5:'tipo_cliente',
6:'sucursal_final',
7:'zona',
8:'ano_castigo',
9:'saldo_k_pareto_mes_vigente',
10:'intereses',
11:'honorarios_20',
12:'saldo_total_mes_vigente',
13:'saldo_total_pareto_mes_vigente_',
14:'saldokpareto',
15:'rango_k_pareto',
16:'interesespareto',
17:'honorariospareto',
18:'porcentaje_k_del_total',
19:'porcentaje_intereses_del_total',
20:'porcentaje_honorarios_del_total',
21:'rango_k_porcentaje',
22:'capital_20_porciento',
23:'dias_mora_acumulado',
24:'marca_juridica_cliente',
25:'focos',
26:'valor_pago',
27:'ultima_fecha_pago',
28:'estado_cliente_mes_anterior',
29:'valor_compromiso',
30:'fecha_compromiso',
31:'fecha_pactada_compromiso',
32:'asesor_compromiso',
33:'ind_m4',
34:'ind_m3',
35:'ind_m2',
36:'ind_m1',
37:'fecha_primer_gestion',
38:'fecha_ultima_gestion',
39:'indicador',
40:'telefono_mejor_gestion',
41:'asesor_mejor_gestion',
42:'fecha_gestion',
43:'contactabilidad',
44:'indicador_hoy',
45:'repeticion',
46:'llamadas',
47:'sms',
48:'correos',
49:'gescall',
50:'whatsapp',
51:'visitas',
52:'no_contacto',
53:'total_gestiones',
54:'telefono_positivo',
55:'fec_ultima_marcacion',
56:'investigacion_de_bienes'})
fn = pd.merge(df,inf,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infC,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infD,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infCi,on = ["deudor_id"]\
,how = "left",indicator = False)
return csv_o(fn,tablename)
def csv_CV_FalaJ(request):
today = datetime.now()
tablename = "CV_FalJ"+today.strftime("%Y%m%d%H") + ".csv"
with open("./hello/Plantillas/Fala/QueryTel_Fal.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Fala/QueryCor_Fal.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Fala/QueryDir_Fal.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/Fala/QueryCV_FalJ.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/Fala/QueryPa_Fal.txt","r") as f5:
queryP_Pa = f5.read()
with open("./hello/Plantillas/Fala/QueryRe_Fal.txt","r") as f6:
queryP_PR = f6.read()
with open("./hello/Plantillas/Fala/QueryCiu_Fal.txt","r") as f7:
queryP_Ciu = f7.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrPa = psql_pdc(queryP_Pa)
anwrR = psql_pdc(queryP_PR)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
anwr_D = pd.DataFrame(anwrD)
anwr_Pa = pd.DataFrame(anwrPa)
anwr_R = pd.DataFrame(anwrR)
anwr_Ci = pd.DataFrame(anwrCi)
df = pd.DataFrame(yanwr)
inf = to_horiz(anwr_P,'phone',"deudor_id")
infC = to_horiz(anwr_C,'mail',"deudor_id")
infD = to_horiz(anwr_D,'address',"deudor_id")
infR = to_horiz(anwr_R,'referencia',"deudor_id")
infCi = to_horiz(anwr_Ci,'town',"deudor_id")
try:
infP = to_horiz(anwr_Pa,'pago','obligacion_id')
if infP.shape[1] > 4:
tipos = infP.dtypes.to_frame()
tipos['index'] = range(len(tipos))
tipos = tipos.set_index('index')
su = []
for n in range(len(tipos)):
if str(tipos.iloc[n,0]) == 'float64':
su.append(n)
else:
pass
infP1 = infP[['pago1','pago2']]
infP1['pago3'] = infP.iloc[:,2:max(su)+1].sum(axis = 1)
infP1['obligacion_id'] = infP.index
infP = infP1
i=0
lin = ['pago1','pago2','pago3']
for i in lin:
infP[i].fillna(0,inplace=True)
infP[i] = infP[i].apply(lambda x: round(x))
infP[i] = '$' + infP[i].astype('str')
except:
pass
#renombrar campos CV
df = df.rename(columns={0:'idcbpo',
1:'tipo_producto_asignacion',
2:'grupo',
3:'cartera',
4:'tipo_cliente',
5:'unico',
6:'unico_pro',
7:'obligacion_id',
8:'deudor_id',
9:'nombre',
10:'producto',
11:'saldototal',
12:'saldo_pareto',
13:'segmentacion',
14:'peso',
15:'alturamora_hoy',
16:'rango',
17:'dias_mora',
18:'vencto',
19:'indicador_mejor_gestion',
20:'total_gestiones',
21:'fecha_ultima_gestion',
22:'asesor_mejor_gestion',
23:'fecha_compromiso',
24:'fecha_pago_compromiso',
25:'valor_compromiso',
26:'asesor',
27:'estado_acuerdo',
28:'dias_mora_pagos',
29:'valor_pago',
30:'fecha_pago',
31:'pendiente',
32:'pago_total',
33:'nvo_status',
34:'status_refresque',
35:'nvo_status_refresque',
36:'dias_mora_refresque',
37:'pendiente_mas_gastos',
38:'vencida_mas_gastos',
39:'gastos_mora',
40:'gastos_cv',
41:'porcentaje_gasto',
42:'valor_a_mantener_sin_gxc',
43:'cv8',
44:'cv9',
45:'cv10',
46:'cv11',
47:'cv12',
48:'restructuracion',
49:'valor_restruc',
50:'pagominimo_actual',
51:'pagominimo_anterior',
52:'periodo_actual',
53:'periodo_anterior',
54:'cuota36',
55:'cuota48',
56:'cuota60',
57:'cuota72',
58:'proyectada_cargue',
59:'aplica_ajuste',
60:'fecha',
61:'diferencia',
62:'porcentaje_saldo_total',
63:'x',
64:'valor',
65:'porcentaje_participacion',
66:'ind_m4',
67:'ind_m3',
68:'ind_m2',
69:'ind_m1',
70:'fecha_primer_gestion',
71:'telefono_mejor_gestion',
72:'fecha_gestion',
73:'contactabilidad',
74:'indicador_hoy',
75:'repeticion',
76:'llamadas',
77:'sms',
78:'correos',
79:'gescall',
80:'whatsapp',
81:'visitas',
82:'no_contacto',
83:'telefono_positivo',
84:'fec_ultima_marcacion',
85:'lista_robinson'})
fn = pd.merge(df,inf,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infR,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infC,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infD,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infCi,on = ["deudor_id"]\
,how = "left",indicator = False)
if 'infP' in locals():
# Cruce pagos
fn = pd.merge(fn,infP,on = ["obligacion_id"]\
,how = "left",indicator = False)
# ordenamiento
lt = fn.columns.tolist()
lt = lt[:29] + lt[(infP.shape[1]-1)*-1:] + lt[29:fn.shape[1]-(infP.shape[1]-1)]
fn = fn[lt]
return csv_o(fn,tablename)
def csv_CV_FalaC(request):
today = datetime.now()
tablename = "CV_FalC"+today.strftime("%Y%m%d%H") + ".csv"
with open("./hello/Plantillas/Fala/QueryTel_Fal.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Fala/QueryCor_Fal.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Fala/QueryDir_Fal.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/Fala/QueryCV_FalC.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/Fala/QueryPa_Fal.txt","r") as f5:
queryP_Pa = f5.read()
with open("./hello/Plantillas/Fala/QueryRe_Fal.txt","r") as f6:
queryP_PR = f6.read()
with open("./hello/Plantillas/Fala/QueryCiu_Fal.txt","r") as f7:
queryP_Ciu = f7.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrPa = psql_pdc(queryP_Pa)
anwrR = psql_pdc(queryP_PR)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
anwr_D = pd.DataFrame(anwrD)
anwr_Pa = pd.DataFrame(anwrPa)
anwr_R = pd.DataFrame(anwrR)
anwr_Ci = pd.DataFrame(anwrCi)
df = pd.DataFrame(yanwr)
inf = to_horiz(anwr_P,'phone',"deudor_id")
infC = to_horiz(anwr_C,'mail',"deudor_id")
infD = to_horiz(anwr_D,'address',"deudor_id")
infR = to_horiz(anwr_R,'referencia',"deudor_id")
infCi = to_horiz(anwr_Ci,'town',"deudor_id")
try:
infP = to_horiz(anwr_Pa,'pago','obligacion_id')
if infP.shape[1] > 4:
tipos = infP.dtypes.to_frame()
tipos['index'] = range(len(tipos))
tipos = tipos.set_index('index')
su = []
for n in range(len(tipos)):
if str(tipos.iloc[n,0]) == 'float64':
su.append(n)
else:
pass
infP1 = infP[['pago1','pago2']]
infP1['pago3'] = infP.iloc[:,2:max(su)+1].sum(axis = 1)
infP1['obligacion_id'] = infP.index
infP = infP1
i=0
lin = ['pago1','pago2','pago3']
for i in lin:
infP[i].fillna(0,inplace=True)
infP[i] = infP[i].apply(lambda x: round(x))
infP[i] = '$' + infP[i].astype('str')
except:
pass
#renombrar campos CV
df = df.rename(columns={0:'idcbpo',
1:'tipo_producto_asignacion',
2:'grupo',
3:'cartera',
4:'tipo_cliente',
5:'unico',
6:'unico_pro',
7:'obligacion_id',
8:'deudor_id',
9:'nombre',
10:'producto',
11:'saldototal',
12:'saldo_pareto',
13:'segmentacion',
14:'peso',
15:'alturamora_hoy',
16:'rango',
17:'dias_mora',
18:'vencto',
19:'indicador_mejor_gestion',
20:'total_gestiones',
21:'fecha_ultima_gestion',
22:'asesor_mejor_gestion',
23:'fecha_compromiso',
24:'fecha_pago_compromiso',
25:'valor_compromiso',
26:'asesor',
27:'estado_acuerdo',
28:'dias_mora_pagos',
29:'valor_pago',
30:'fecha_pago',
31:'pendiente',
32:'pago_total',
33:'nvo_status',
34:'status_refresque',
35:'nvo_status_refresque',
36:'dias_mora_refresque',
37:'pendiente_mas_gastos',
38:'vencida_mas_gastos',
39:'gastos_mora',
40:'gastos_cv',
41:'porcentaje_gasto',
42:'valor_a_mantener_sin_gxc',
43:'cv1',
44:'cv2',
45:'cv3',
46:'cv4',
47:'cv5',
48:'cv6',
49:'cv7',
50:'cv8',
51:'cv9',
52:'cv10',
53:'cv11',
54:'cv12',
55:'restructuracion',
56:'valor_restruc',
57:'pagominimo_actual',
58:'pagominimo_anterior',
59:'periodo_actual',
60:'periodo_anterior',
61:'cuota36',
62:'cuota48',
63:'cuota60',
64:'cuota72',
65:'proyectada_cargue',
66:'aplica_ajuste',
67:'fecha',
68:'diferencia',
69:'porcentaje_saldo_total',
70:'x',
71:'valor',
72:'porcentaje_participacion',
73:'ind_m4',
74:'ind_m3',
75:'ind_m2',
76:'ind_m1',
77:'fecha_primer_gestion',
78:'telefono_mejor_gestion',
79:'fecha_gestion',
80:'contactabilidad',
81:'indicador_hoy',
82:'repeticion',
83:'llamadas',
84:'sms',
85:'correos',
86:'gescall',
87:'whatsapp',
88:'visitas',
89:'no_contacto',
90:'telefono_positivo',
91:'fec_ultima_marcacion',
92:'lista_robinson'})
fn = pd.merge(df,inf,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infR,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infC,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infD,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infCi,on = ["deudor_id"]\
,how = "left",indicator = False)
if 'infP' in locals():
# Cruce pagos
fn = pd.merge(fn,infP,on = ["obligacion_id"]\
,how = "left",indicator = False)
# ordenamiento
lt = fn.columns.tolist()
lt = lt[:27] + lt[(infP.shape[1]-1)*-1:] + lt[27:fn.shape[1]-(infP.shape[1]-1)]
fn = fn[lt]
return csv_o(fn,tablename)
def csv_CV_Sant(request):
today = datetime.now()
tablename = "CV_San"+today.strftime("%Y%m%d%H") + ".csv"
with open("./hello/Plantillas/Sant/QueryTel_San.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Sant/QueryCor_San.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Sant/QueryDir_San.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/Sant/QueryCV_San.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/Sant/QueryCiu_San.txt","r") as f5:
queryP_Ciu = f5.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = | pd.DataFrame(anwr) | pandas.DataFrame |
import click
import os
from tqdm import tqdm
import pandas as pd
from PIL import Image
@click.command()
@click.option('--images', help='input directory')
@click.option('--output', help='output directory')
def main(images, output):
""" convert tiled images to abd format for building detection """
os.makedirs(output, exist_ok=True)
os.makedirs(os.path.join(output, 'images'), exist_ok=True)
cover = | pd.DataFrame() | pandas.DataFrame |
from . import pyheclib
import pandas as pd
import numpy as np
import os
import time
import warnings
# some static functions
def set_message_level(level):
"""
set the verbosity level of the HEC-DSS library
level ranges from "bort" only (level 0) to "internal" (level >10)
"""
pyheclib.hec_zset('MLEVEL','',level)
def set_program_name(program_name):
"""
sets the name of the program (upto 6 chars long) to store with data
"""
name=program_name[:min(6,len(program_name))]
pyheclib.hec_zset('PROGRAM',name,0)
def get_version(fname):
"""
Get version of DSS File
returns a tuple of string version of 4 characters and integer version
"""
return pyheclib.hec_zfver(fname);
class DSSFile:
#DSS missing conventions
MISSING_VALUE=-901.0
MISSING_RECORD=-902.0
FREQ_EPART_MAP = {
pd.tseries.offsets.Minute(n=1):"1MIN",
pd.tseries.offsets.Minute(n=2):"2MIN",
pd.tseries.offsets.Minute(n=3):"3MIN",
pd.tseries.offsets.Minute(n=4):"4MIN",
pd.tseries.offsets.Minute(n=5):"5MIN",
pd.tseries.offsets.Minute(n=10):"10MIN",
pd.tseries.offsets.Minute(n=15):"15MIN",
pd.tseries.offsets.Minute(n=20):"20MIN",
pd.tseries.offsets.Minute(n=30):"30MIN",
pd.tseries.offsets.Hour(n=1):"1HOUR",
| pd.tseries.offsets.Hour(n=2) | pandas.tseries.offsets.Hour |
# bchhun, {2020-03-22}
import csv
import natsort
import numpy as np
import os
import xmltodict
from xml.parsers.expat import ExpatError
import xml.etree.ElementTree as ET
import pandas as pd
import math
import array_analyzer.extract.constants as constants
"""
functions like "create_<extension>_dict" parse files of <extension> and return:
fiduc: list of dict describing fiducial positions
spots: list of dict, other spot info
repl: list of dict describing 'replicates' AKA antigens
params: dict containing hardware and array parameters
functions like "populate_array_<type>" take <type> from above (like fiduc, spots, repl, param) and
populate np.ndarrays indices correspond to array positions
The values of the arrays depend on the function call
- populate_array_id : Cell id like "spot-6-2", "spot-5-5-" etc..
- populate_array_spots_type : Type like "Diagnostic", "Positive Control"
- populate_array_antigen : Antigen
*** NOTE ***
populating antigens is more complicated for .xml parsing than for .csv or .xlsx
.xml files have keys:"antigen", values: multiple "spot_ID"
.csv or .xlsx can map (col, row) directly to "antigen"
"""
def create_xml_dict(path_):
"""
receives an .xml file generated by the Scienion sciReader software
and returns dictionaries containing info.
:param str path_: Full path of xml file
:return dict fiduc: Fiducials and control info
:return dict spots: Spot info
:return dict repl: Replicate info
:return dict params: Additional parameters
"""
try:
with open(path_) as fd:
doc = xmltodict.parse(fd.read())
except ExpatError:
tree = ET.parse(path_)
xml_data = tree.getroot()
# here you can change the encoding type to be able to set it to the one you need
xmlstr = ET.tostring(xml_data, encoding='utf-8', method='xml')
doc = xmltodict.parse(xmlstr)
try:
# layout of array
layout = doc['configuration']['well_configurations']['configuration']['array']['layout']
# fiducials
fiduc = layout['marker']
# spot IDs
spots = doc['configuration']['well_configurations']['configuration']['array']['spots']['spot']
# replicates
repl = doc['configuration']['well_configurations']['configuration']['array']['spots']['multiplet']
array_params = dict()
array_params['rows'] = int(layout['@rows'])
array_params['columns'] = int(layout['@cols'])
array_params['v_pitch'] = float(layout['@vspace'])
array_params['h_pitch'] = float(layout['@hspace'])
array_params['spot_width'] = float(layout['@expected_diameter'])
array_params['bg_offset'] = float(layout['@background_offset'])
array_params['bg_thickness'] = float(layout['@background_thickness'])
array_params['max_diam'] = float(layout['@max_diameter'])
array_params['min_diam'] = float(layout['@min_diameter'])
except Exception as ex:
raise AttributeError(f"exception while parsing .xml : {ex}")
return fiduc, spots, repl, array_params
def create_csv_dict(path_):
"""
Looks for three .csv files:
"array_parameters.csv" contains array printing parameters as well as hardware parameters
"array_format_type.csv" contains fiducial and control names, locations
"array_format_antigen.csv" contains specific antigen names for only diagnostic spots
Then, parses the .csvs and creates the four dictionaries:
:param path_: list
of strings to the .csv paths
:return:
"""
# assign names to each of the types == params, spot types, antigens
fiduc = list()
csv_antigens = list()
array_params = dict()
for meta_csv in path_:
with open(meta_csv, newline='') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
# header row
if "Parameter" in str(row[0]) or '' in str(row[0]):
continue
# parse params
elif "array_format_parameters" in meta_csv:
array_params[row[0]] = row[1]
# parse fiducials
elif "array_format_type" in meta_csv:
for col, value in enumerate(row[1:]):
pos = {'@row': str(row[0]),
'@col': str(col - 1),
'@spot_type': str(value)}
fiduc.append(pos)
# parse antigens
elif "array_format_antigen" in meta_csv:
for col, value in enumerate(row[1:]):
pos = {'@row': str(row[0]),
'@col': str(col - 1),
'@antigen': str(value)}
csv_antigens.append(pos)
return fiduc, None, csv_antigens, array_params
def create_xlsx_dict(xlsx):
"""
extracts fiducial, antigen, and array parameter metadata from .xlsx sheets
then populates dictionaries or lists with appropriate information
The output dictionaries and lists conform the .xml-style parsing. This is for consistency
:param dict xlsx: Opened xlsx sheets
:return list fiduc: Fiducials and control info
:return list spots: None. spots IDs not needed for .xlsx
:return list repl: Replicate (antigen)
:return dict params: Additional parameters about hardware and array
"""
fiduc = list()
xlsx_antigens = list()
array_params = dict()
# populate array parameters
for idx, value in enumerate(xlsx['imaging_and_array_parameters']['Parameter']):
array_params[value] = xlsx['imaging_and_array_parameters']['Value'][idx]
# Unless specified, run analysis with fiducials only
# Otherwise run with all non-negative spots
fiducials_only = True
if 'fiducials_only' in array_params:
if array_params['fiducials_only'] != 1:
fiducials_only = False
# populate fiduc list
for col in xlsx['antigen_type'].keys()[1:]:
for row, value in enumerate(xlsx['antigen_type'][col]):
if type(value) is float:
if math.isnan(value):
continue
else:
if not fiducials_only and "Negative" not in value:
pos = {'@row': row,
'@col': col,
'@spot_type': "Fiducial"}
fiduc.append(pos)
elif "Fiducial" in value or "xkappa-biotin" in value or "Fiducial, Diagnostic" in value:
pos = {'@row': row,
'@col': col,
'@spot_type': "Fiducial"}
fiduc.append(pos)
# find and populate antigen list
for col in xlsx['antigen_array'].keys()[1:]:
for row, value in enumerate(xlsx['antigen_array'][col]):
if type(value) is float:
if math.isnan(value):
continue
else:
pos = {'@row': row,
'@col': col,
'@antigen': str(value)}
xlsx_antigens.append(pos)
return fiduc, xlsx_antigens, array_params
def create_xlsx_array(path_):
"""
(unfinished attempt to convert arrays in xlsx to np.ndarrays directly, using pandas)
:param path_:
:return:
"""
array_params = dict()
# populate array parameters
params = pd.read_excel(path_, sheet_name='imaging_and_array_parameters')
for idx, value in enumerate(params['Parameter']):
array_params[value] = params['Value'][idx]
# populate fiducials array
fiduc_df = pd.read_excel(path_, sheet_name='antigen_type')
fiduc = fiduc_df.to_numpy(dtype='U100')
# populate xlsx
xlsx_antigens_df = pd.read_excel(path_, sheet_name='antigen_array')
xlsx_antigens = xlsx_antigens_df.to_numpy(dtype='U100')
return fiduc, None, xlsx_antigens, array_params
def create_array(rows_, cols_, dtype='U100'):
"""
creates an empty numpy array whose elements are long strings
:param rows_: int
provided by params
:param cols_: int
provided by params
:param dtype: str
type of element in this np array
:return: np.ndarray
"""
xml_numpy_array = np.empty(shape=(rows_, cols_), dtype=np.dtype(dtype))
return xml_numpy_array
def populate_array_id(arr, spots):
"""
receives an empty array
populates the array with values corresponding to spot ID:
ID like : "spot-1-2", "spot-2-4", "spot-3-3-", etc...
:param arr: np.ndarray
numpy array generated from "create_array"
:param spots: dict
dict from "create_xml_dict"
:return: np.ndarray
populated array
"""
for spot in spots:
r = int(spot['@row'])
c = int(spot['@col'])
ID = spot['@id']
arr[r, c] = ID
return arr
def populate_array_spots_type(arr, spots, fiduc):
"""
receives an empty array
populates the array with values corresponding to "spot_type":
spot_type like : "Diagnostic", "PositiveControl", "NegativeControl"
:param arr: np.ndarray
numpy array generated from "create_array"
:param spots: dict
list of dict from "create_xml_dict"
:param fiduc: list
list of dict from "create_xml_dict"
:return: np.ndarray
populated array
"""
for spot in spots:
r = int(spot['@row'])
c = int(spot['@col'])
v = spot['@spot_type']
arr[r, c] = v
for f in fiduc:
r = int(f['@row'])
c = int(f['@col'])
v = f['@spot_type']
arr[r, c] = v
return arr
def populate_array_fiduc(arr, fiduc):
"""
assigns only fiducials to the positions of the array
:param arr: np.ndarray
target array
:param fiduc: list
list of dict output of "create_xml_dict"
:return: np.ndarray
modified target array
"""
for f in fiduc:
r = int(f['@row'])
c = int(f['@col'])
v = f['@spot_type']
arr[r, c] = v
return arr
def populate_array_antigen_xml(arr, id_arr_, repl):
"""
populates an array with the antigen
scans through "replicate" in the .xml and assigns all spots the appropriate antigen
:param arr: np.ndarray
numpy array generated from "create_array"
:param id_arr_: np.ndarray
array from populate_array_id
:param repl: dict
dict from "create_xml_dict"
:return: np.ndarray
populated array
"""
for rep in repl:
antigen = rep['@id']
all_spots = rep['id'] # list of IDs
for spot in all_spots:
arr[np.where(id_arr_ == spot)] = antigen
return arr
def populate_array_antigen(arr, csv_antigens_):
"""
populates an array with antigen
used for metadata obtained through .csv files
:param arr:
:param csv_antigens_: list
list of dictionaries whose keys define array coordinates and values
:return:
"""
for antigen in csv_antigens_:
r = antigen['@row']
c = antigen['@col']
v = antigen['@antigen']
arr[r, c] = v
return arr
def rerun_xl_od(well_names, well_xlsx_path, rerun_names, xlsx_writer):
"""
Load stats_per_well excel file and copy over existing well sheets
before rerunning some of the wells.
:param list well_names: Well names (e.g. ['B12', 'C2'])
:param str well_xlsx_path: Full path to well stats xlsx sheet
:param list rerun_names: Names of wells to be rerun
:param pd.ExcelWriter xlsx_writer: Pandas excel writer
"""
rerun_set = set(rerun_names)
assert rerun_set.issubset(well_names), \
"All rerun wells can't be found in input directory"
assert os.path.isfile(well_xlsx_path),\
"Can't find stats_per_well excel: {}".format(well_xlsx_path)
ordered_dict = | pd.read_excel(well_xlsx_path, sheet_name=None) | pandas.read_excel |
from summit.utils.dataset import DataSet
from summit.domain import *
from summit.experiment import Experiment
from summit import get_summit_config_path
from summit.utils import jsonify_dict, unjsonify_dict
import torch
import torch.nn.functional as F
from skorch import NeuralNetRegressor
from skorch.utils import to_device
from sklearn.compose import ColumnTransformer, TransformedTargetRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder, FunctionTransformer
from sklearn.model_selection import (
train_test_split,
cross_validate,
GridSearchCV,
ParameterGrid,
)
from sklearn.model_selection._search import BaseSearchCV, _check_param_grid
from sklearn.base import (
BaseEstimator,
RegressorMixin,
is_classifier,
clone,
TransformerMixin,
)
from sklearn.model_selection._split import check_cv
from sklearn.model_selection._validation import (
_fit_and_score,
_score,
_aggregate_score_dicts,
)
from sklearn.metrics import r2_score
from sklearn.utils.validation import (
_deprecate_positional_args,
indexable,
check_is_fitted,
_check_fit_params,
)
from sklearn.utils import check_array, _safe_indexing
from sklearn.utils.fixes import delayed
from sklearn.metrics._scorer import _check_multimetric_scoring
from scipy.sparse import issparse
from tqdm.auto import tqdm
from joblib import Parallel
import pathlib
import numpy as np
from numpy.random import default_rng
import pandas as pd
from copy import deepcopy
from itertools import product
from collections import defaultdict
from copy import deepcopy
import pkg_resources
import time
import json
import types
import warnings
__all__ = [
"ExperimentalEmulator",
"ANNRegressor",
"get_bnn",
"RegressorRegistry",
"registry",
"get_pretrained_reizman_suzuki_emulator",
"get_pretrained_baumgartner_cc_emulator",
"ReizmanSuzukiEmulator",
"BaumgartnerCrossCouplingEmulator",
]
class ExperimentalEmulator(Experiment):
"""Experimental Emulator
Train a machine learning model based on experimental data.
The model acts a benchmark for testing optimisation strategies.
Parameters
----------
model_name : str
Name of the model, ideally with no spaces
domain : :class:`~summit.domain.Domain`
The domain of the emulator
dataset : :class:`~summit.dataset.Dataset`, optional
Dataset used for training/validation
regressor : :class:`torch.nn.Module`, optional
Pytorch LightningModule class. Defaults to the ANNRegressor
output_variable_names : str or list, optional
The names of the variables that should be trained by the predictor.
Defaults to all objectives in the domain.
descriptors_features : list, optional
A list of input categorical variable names that should be transformed
into their descriptors instead of using one-hot encoding.
clip : bool or list, optional
Whether to clip predictions to the limits of
the objectives in the domain. True (default) means
clipping is activated for all outputs and False means
it is not activated at all. A list of specific outputs to clip
can also be passed.
Notes
-----
By default, categorical features are pre-processed using one-hot encoding.
If descriptors are avaialble, they can be used on a feature-by-feature basis
by specifying names of categorical variables in the descriptors_features keyword
argument.
Examples
--------
>>> from summit.benchmarks import ExperimentalEmulator, ReizmanSuzukiEmulator
>>> from summit.utils.dataset import DataSet
>>> import matplotlib.pyplot as plt
>>> import pathlib
>>> import pkg_resources
>>> # Steal domain and data from Reizman example
>>> DATA_PATH = pathlib.Path(pkg_resources.resource_filename("summit", "benchmarks/data"))
>>> model_name = f"reizman_suzuki_case_1"
>>> domain = ReizmanSuzukiEmulator.setup_domain()
>>> ds = DataSet.read_csv(DATA_PATH / f"{model_name}.csv")
>>> # Create emulator and train (bump max_epochs to 1000 to get better training)
>>> exp = ExperimentalEmulator(model_name,domain,dataset=ds)
>>> res = exp.train(max_epochs=10, cv_folds=2, random_state=100, test_size=0.2)
>>> # Plot to show the quality of the fit
>>> fig, ax = exp.parity_plot(include_test=True)
>>> plt.show()
>>> # Get scores on the test set
>>> scores = exp.test() # doctest: +SKIP
"""
def __init__(self, model_name, domain, **kwargs):
super().__init__(domain, **kwargs)
self.model_name = model_name
# Data
self.ds = kwargs.get("dataset")
self.descriptors_features = kwargs.get("descriptors_features", [])
if self.ds is not None:
self.n_features = self._caclulate_input_dimensions(
self.domain, self.descriptors_features
)
self.n_examples = self.ds.shape[0]
self.output_variable_names = kwargs.get(
"output_variable_names",
[v.name for v in self.domain.output_variables],
)
# Create the regressor
self.regressor = kwargs.get("regressor", ANNRegressor)
self.predictors = kwargs.get("predictors")
self.clip = kwargs.get("clip", True)
def _run(self, conditions, **kwargs):
input_columns = [v.name for v in self.domain.input_variables]
X = conditions[input_columns].to_numpy()
if X.shape[0] == len(input_columns):
X = X[np.newaxis, :]
X = | pd.DataFrame(X, columns=input_columns) | pandas.DataFrame |
import itertools
import numpy as np
import cantera as ct
import pandas as pd
import re
import pickle
from .. import simulation as sim
from ...cti_core import cti_processor as ctp
class shockTube(sim.Simulation):
def __init__(self,pressure:float,temperature:float,observables:list,
kineticSens:int,physicalSens:int,conditions:dict,
initialTime,finalTime,thermalBoundary,mechanicalBoundary,
processor:ctp.Processor=None,cti_path="",save_timeHistories:int=0,
save_physSensHistories=0,moleFractionObservables:list=[],
absorbanceObservables:list=[],concentrationObservables:list=[],
fullParsedYamlFile:dict={},
time_shift_value:float = 0, atol:float=1e-15, rtol:float=1e-9,
rtol_sens:float=0.0001,
atol_sens:float=1e-6):
'''
Child class pertaining to shock tube simulations. Inherits all
attributes and methods from simulations class including __init__().
Also has its own internal init method due to additional data
requirements.
Parameters
----------
pressure : float
Pressure in [atm].
temperature : float
Temperature in [K].
observables : list
Species which sensitivity analysis is performed for.
kineticSens : int
0 for off, 1 for on.
physicalSens : int
0 for off, 1 for on.
conditions : dict
Initial mole fractions for species in simulation.
initialTime : float
Time to begin simulation from (s).
finalTime : float
Time to end simulation (s).
thermalBoundary : str
Thermal boundary condition inside the reactor. Shock tubes can
either be adiabatic or isothermal.
mechanicalBoundary : TYPE
Mechanical bondary condition inside the reactor. Shock tubes can
either be constant pressure or constant volume.
processor : ctp.Processor, optional
Loaded cti file. The default is None.
cti_path : str, optional
Path of cti file for running. If processor is provided this is not
needed. The default is "".
save_timeHistories : int, optional
Boolean variable describing if time histories for simulation runs
are saved. 0 for not saved, 1 for saved. The default is 0.
save_physSensHistories : TYPE, optional
Boolean variable describing if physical sensitivity time histories
are saved. 0 for not saved, 1 for saved. The default is 0.
moleFractionObservables : list, optional
Species for which experimental data in the form of mole fraction
time histories will be provided for optimization.
Kinetic sensitivities are calculated for all these species.
The default is [].
absorbanceObservables : list, optional
Species for which experimental data in the form of summed
absorption time histories (from some or all of the species) will be
provided for optimization.
Kinetic sensitivities are calculated for all these species.
The default is [].
concentrationObservables : list, optional
Species for which experimental data in the form of concentration
time histories will be provided for optimization.
Kinetic sensitivities are calculated for all these species.
The default is [].
fullParsedYamlFile : dict, optional
Full dictionary from the parsed shock tube yaml file.
The default is {}.
time_shift_value : float, optional
The numerical value by which the time vector of the simulation
is shifted in seconds. The default is 0.
atol : float, optional
Get the absolute error tolerance. The default is 1e-15.
rtol : float, optional
Get the relative error tolerance. The default is 1e-9.
rtol_sens : float, optional
Scalar relative error tolerance for sensitivity. The default is 0.0001.
atol_sens : float, optional
Scalar absolute error tolerance for sensitivity. The default is 1e-6.
Returns
-------
None.
'''
sim.Simulation.__init__(self,pressure,temperature,observables,kineticSens,physicalSens,
conditions,processor,cti_path)
self.initialTime = initialTime
self.finalTime = finalTime
self.thermalBoundary = thermalBoundary
self.mechanicalBoundary = mechanicalBoundary
self.kineticSensitivities= None
self.timeHistory = None
self.experimentalData = None
self.concentrationObservables = concentrationObservables
self.moleFractionObservables = moleFractionObservables
self.absorbanceObservables = absorbanceObservables
self.fullParsedYamlFile = fullParsedYamlFile
self.time_shift_value = time_shift_value
if save_timeHistories == 1:
self.timeHistories=[]
self.timeHistoryInterpToExperiment = None
self.pressureAndTemperatureToExperiment = None
else:
self.timeHistories=None
if save_physSensHistories == 1:
self.physSensHistories = []
self.setTPX()
self.dk = [0]
self.atol=atol
self.rtol=rtol
self.rtol_sensitivity=rtol_sens
self.atol_sensitivity=atol_sens
def printVars(self):
'''
Prints variables associated with the reactor initial conditions.
'''
print('initial time: {0}\nfinal time: {1}\n'.format(self.initialTime,self.finalTime),
'\nthermalBoundary: {0}\nmechanicalBoundary: {1}'.format(self.thermalBoundary,self.mechanicalBoundary),
'\npressure: {0}\ntemperature: {1}\nobservables: {2}'.format(self.pressure,self.temperature,self.observables),
'\nkineticSens: {0}\nphysicalSens: {1}'.format(self.kineticSens,self.physicalSens),
'\nTPX: {0}'.format(self.processor.solution.TPX)
)
#maybe unify paths with cti file?, also really fix the python styling
def write_time_histories(self, path=''):
if self.timeHistories == None:
print("Error: this simulation is not saving time histories, reinitialize with flag")
return -1
if path=='':
path = './time_histories.time'
pickle.dump(self.timeHistories,open(path,'wb'))
return 0
def write_physSensHistories(self, path=''):
if self.physSensHistories == None:
print("Error: this simulation is not saving time histories, reinitialize with flag")
return -1
if path=='':
path = './physSensHistories.sens'
pickle.dump(self.physSensHistories,open(path,'wb'))
return 0
def load_physSensHistories(self, path=''):
if self.physSensHistories == None:
print("Error: this simulation is not saving time histories, reinitialize with flag")
return -1
if path=='':
path = './physSensHistories.sens'
pickle.load(self.physSensHistories,open(path,'wb'))
return 0
#note this is destructive, the original timeHistories are overwritten, run before any runs
#same write is destructive by default
def load_time_histories(self, path=''):
if self.timeHistories == None:
print("Error: this simulation is not saving time histories, reinitialize with flag")
return -1
if path=='':
path = './time_histories.time'
pickle.load(self.timeHistories,open(path,'wb'))
return 0
def settingShockTubeConditions(self):
'''
Determine the mechanical and thermal boundary conditions for a
shock tube based on what was initialized.
'''
#assigning the thermal boundary variable
if re.match('[aA]diabatic',self.thermalBoundary):
energy = 'on'
elif re.match('[iI]sothermal',self.thermalBoundary):
energy = 'off'
else:
raise Exception('Please specify a thermal boundary condition, adiabatic or isothermal')
#assigning the mehcanical boundary variable
if re.match('[Cc]onstant [Pp]ressure',self.mechanicalBoundary):
mechBoundary = 'constant pressure'
elif re.match('[Cc]onstant [Vv]olume',self.mechanicalBoundary):
mechBoundary = 'constant volume'
else:
raise Exception('Please specifiy a mehcanical boundary condition, constant pressure or constant volume')
#return the thermal and mechanical boundary of the shock tube
return energy,mechBoundary
def sensitivity_adjustment(self,temp_del:float=0.0,
pres_del:float=0.0,
spec_pair:(str,float)=('',0.0)):
'''
Appends the sensitivity adjustment to list, and calls sensitivity adjustment
function from the simulations class, to adjust P,T,X for the sensitivity
calculation
Parameters
----------
temp_del : float, optional
The decimal value of the percent by which temperature is perturbed.
The default is 0.0.
pres_del : float, optional
The decimal value of the percent by which pressure is perturbed.
The default is 0.0.
spec_pair : (str,float), optional
The string of a species and the decimal value of the percent by
which that species is perturbed . The default is ('',0.0).
Returns
-------
data : Pandas Data Frame
Time history of the perturbed simulation.
'''
if temp_del != 0.0:
self.dk.append(temp_del)
if pres_del != 0.0:
self.dk.append(pres_del)
if spec_pair[1] != 0.0:
self.dk.append(spec_pair[1])
kin_temp = self.kineticSens
self.kineticSens = 0
data = sim.Simulation.sensitivity_adjustment(self,temp_del,pres_del,spec_pair)
self.kineticSens = kin_temp
return data
def run(self,initialTime:float=-1.0, finalTime:float=-1.0):
'''
Run the shock tube simulation
Parameters
----------
initialTime : float, optional
The time at which the reactor simulation begins, in seconds. The default is -1.0.
finalTime : float, optional
The time at which the reactor simulation ends, in seconds. The default is -1.0.
Returns
-------
timeHistory: Pandas DataFrame
Time history of simulation containing temperature, pressurea and
species results.
kineticSensitivities: numpy array
three dimensional numpy array: (time x reaction x observable).
'''
if initialTime == -1.0:
initialTime = self.initialTime
if finalTime == -1.0:
finalTime = self.finalTime
self.timeHistory = None
self.kineticSensitivities= None #3D numpy array, columns are reactions with timehistories, depth gives the observable for those histories
conditions = self.settingShockTubeConditions()
mechanicalBoundary = conditions[1]
#same solution for both cp and cv sims
if mechanicalBoundary == 'constant pressure':
shockTube = ct.IdealGasConstPressureReactor(self.processor.solution,
name = 'R1',
energy = conditions[0])
else:
shockTube = ct.IdealGasReactor(self.processor.solution,
name = 'R1',
energy = conditions[0])
sim = ct.ReactorNet([shockTube])
sim.rtol=self.rtol
sim.atol=self.atol
#print(sim.rtol_sensitivity,sim.atol_sensitivity)
sim.rtol_sensitivity=self.rtol_sensitivity
sim.atol_sensitivity=self.atol_sensitivity
columnNames = [shockTube.component_name(item) for item in range(shockTube.n_vars)]
columnNames = ['time']+['pressure']+columnNames
self.timeHistory = pd.DataFrame(columns=columnNames)
if self.kineticSens == 1:
for i in range(self.processor.solution.n_reactions):
shockTube.add_sensitivity_reaction(i)
dfs = [pd.DataFrame() for x in range(len(self.observables))]
tempArray = [np.zeros(self.processor.solution.n_reactions) for x in range(len(self.observables))]
t = self.initialTime
counter = 0
#print(sim.rtol_sensitivity,sim.atol_sensitivity)
while t < self.finalTime:
t = sim.step()
if mechanicalBoundary =='constant volume':
state = np.hstack([t,shockTube.thermo.P,shockTube.mass,shockTube.volume,
shockTube.T, shockTube.thermo.X])
else:
state = np.hstack([t,shockTube.thermo.P, shockTube.mass,
shockTube.T, shockTube.thermo.X])
self.timeHistory.loc[counter] = state
if self.kineticSens == 1:
counter_1 = 0
for observable,reaction in itertools.product(self.observables, range(self.processor.solution.n_reactions)):
tempArray[self.observables.index(observable)][reaction] = sim.sensitivity(observable,
reaction)
counter_1 +=1
if counter_1 % self.processor.solution.n_reactions == 0:
dfs[self.observables.index(observable)] = dfs[self.observables.index(observable)].append(((
pd.DataFrame(tempArray[self.observables.index(observable)])).transpose()),
ignore_index=True)
counter+=1
if self.timeHistories != None:
self.timeHistory.time = self.timeHistory.time + self.time_shift_value
#self.timeHistory.time = self.timeHistory.time + 0
self.timeHistories.append(self.timeHistory)
############################################################
if self.kineticSens == 1:
numpyMatrixsksens = [dfs[dataframe].values for dataframe in range(len(dfs))]
self.kineticSensitivities = np.dstack(numpyMatrixsksens)
return self.timeHistory,self.kineticSensitivities
else:
return self.timeHistory
#interpolate the most recent time history against the oldest by default
#working_data used if have list not pandas frame
#return more data about what was interpolated in a tuple?
def interpolate_time(self,index:int=None,time_history=None):
'''
This function interpolates and returns the most recent time history
against the original time history by default, unless a specific time
history index or time history is passed in. If an index is passed in
then an interpolated time history associated with that index in the
list is returned. If a specific time_history is passed in then an
interpolated version of that time history is returned.
Parameters
----------
index : int, optional
The index value of the specific time history to be interpolated.
The default is None.
time_history : Pandas Data Frame, optional
Pandas dataframe containing the time history from a simulation.
The default is None.
Returns
-------
interpolatedTimeHistory: Pandas Data Frame
Interpolated time history.
'''
if self.timeHistories == None:
print("Error: this simulation is not saving time histories, reinitialize with flag")
return -1
else:
if index is not None and time_history is not None:
print("Error: can only specify one of index, time_history")
return -1
if index is None:
index = -1
if time_history is None:
return self.interpolation(self.timeHistories[0],self.timeHistories[index],["temperature","pressure"]+self.observables)
else:
return self.interpolation(self.timeHistories[0],time_history,["temperature","pressure"]+self.observables)
#assumes most recent time histories are the correct ones to interpolate on
#interpolates agains the original time history
def interpolate_species_adjustment(self):
'''
This function interpolates the time history of a species adjustment run,
against the original time history.
'''
interpolated_data = []
species_to_loop = set(self.conditions.keys()).difference(['Ar','AR','HE','He','Kr','KR','Xe','XE','NE','Ne'])
for x in range(0,len(species_to_loop)):
interpolated_data.insert(0,self.interpolate_time(index=-1-x))
return interpolated_data
def interpolate_species_sensitivities(self):
'''
This function interpolates the time history of a species sensitivity
against the original time history.
'''
interpolated_data = self.interpolate_species_adjustment()
interpolated_sens = []
ind_off = len(self.timeHistories)-len(set(self.conditions.keys()).difference(['Ar','AR','HE','He','Kr','KR','Xe','XE','NE','Ne']))
for i,th in enumerate(interpolated_data):
ind = ind_off + i
interpolated_sens.append(self.interpolate_physical_sensitivities(index=ind,time_history=th))
return interpolated_sens
#interpolate a range of time histories agains the original
#possibly add experimental flag to do range with exp data
#end is exclusive
#start here tomorrow
def interpolate_range(self,begin:int,end:int):
'''
Function that defines a time range for interpolation.
Parameters
----------
begin : int
Time to begin interpolation.
end : int
Time to end interpolation.
Returns
-------
interpolated_data : Pandas Data Frame
Interpolated time history.
'''
if begin<0 or end>len(self.timeHistories):
print("Error: invalid indices")
if self.timeHistories == None:
print("Error: simulation is not saving time histories")
interpolated_data = []
for x in range(begin,end):
interpolated_data.append(self.interpolate_time(index=x))
return interpolated_data
#interpolates agains the original time history
def interpolate_physical_sensitivities(self, index:int=-1,
time_history=None):
'''
This function interpolates the time history of a physical sensitivity,
excluding species, against the original time history and returns a
numpy array.
Parameters
----------
index : int, optional
The index value of the specific time history to be interpolated.
The default is -1.
time_history : Pandas Data Frame, optional
Pandas dataframe containing the time history from a simulation.
The default is None.
Returns
-------
sensitivity: Pandas Data Frame.
Physical sensitivity Data Frame.
'''
interpolated_time = self.interpolate_time(index) if time_history is None else time_history
#print(interpolated_time)
#calculate which dk
dk = self.dk[index]
sensitivity = self.sensitivityCalculation(self.timeHistories[0][self.observables],
interpolated_time[self.observables],self.observables,dk)
if self.physSensHistories != None:
self.physSensHistories.append(sensitivity)
# print('this is sensitivity')
# print(sensitivity)
return sensitivity
#returns a 3D array of interpolated time histories corrosponding to physical sensitivities
def interpolate_experimental_kinetic(self, pre_interpolated = []):
'''
This function interpolates kinetic sensitivities to experimental data.
Parameters
----------
pre_interpolated : list, optional
List of kinetic sensitivties to be interpolated. If not provided
the function will look for kinetic sensitivities that were saved
as an attribute.
The default is [].
Returns
-------
flipped: numpy array
Interpolated 3d array containing sensitivities.
'''
if self.experimentalData == None:
print("Error: experimental data must be loaded")
return -1
if len(pre_interpolated) == 0 and not self.kineticSensitivities.any():
print("Error: must specify pre_interpolated or have kineticSensitivities run first")
return -1
elif len(pre_interpolated)>0:
array = pre_interpolated
else:
array = self.kineticSensitivities
exp_interp_array = []
#if len(self.experimentalData) < array.shape[2]:
# print("Error: mismatch between kineticSensitivities observables and given experimental data")
# return -1
#exp data and kineticSensitivities must match in size and order of observables
for i,frame in enumerate(self.experimentalData):
if i > array.shape[2]:
break
sheet = array[:,:,i]
exp_interp_array.append([])
for time_history in sheet.T:
# new_history = np.interp(frame.ix[:,0],
# self.timeHistories[0]['time'],
# time_history)
new_history = np.interp(frame.iloc[:,0],
self.timeHistories[0]['time'],
time_history)
new_history = new_history.reshape((new_history.shape[0],
1))
exp_interp_array[-1].append(new_history)
flipped = []
for x in exp_interp_array:
flipped.append(np.hstack(x))
return flipped
def map_and_interp_ksens(self,time_history=None):
'''
This function maps kinetic sensitivity calculations returned from cantera
to kineitcs parameters A,n and Ea, as well as interpolates them to the
corresponding experimental data. It returns a dictonary containing the
interpolated kinetic senstivities.
Parameters
----------
time_history : Pandas Data Frame, optional
The original time history is required for obtaining temperature
values in order to do the mapping. The default is None.
Returns
-------
dict
Dictionary containing the mapping for kinetic sensitivities.
'''
A = self.kineticSensitivities
N = np.zeros(A.shape)
Ea = np.zeros(A.shape)
for i in range(0,A.shape[2]):
sheetA = A[:,:,i] #sheet for specific observable
for x,column in enumerate(sheetA.T):
N[:,x,i]= np.multiply(column,np.log(self.timeHistories[0]['temperature'])) if time_history is None else np.multiply(column,np.log(time_history['temperature']))
#not sure if this mapping is correct, check with burke and also update absorption mapping
#to_mult_ea = np.divide(-1,np.multiply(1/ct.gas_constant,self.timeHistories[0]['temperature'])) if time_history is None else np.divide(-1,np.multiply(ct.gas_constant,time_history['temperature']))
to_mult_ea = np.divide(-1,np.multiply(1,self.timeHistories[0]['temperature'])) if time_history is None else np.divide(-1,np.multiply(1,time_history['temperature']))
Ea[:,x,i]= np.multiply(column,to_mult_ea)
return {'A':self.interpolate_experimental_kinetic(A),
'N':self.interpolate_experimental_kinetic(N),
'Ea':self.interpolate_experimental_kinetic(Ea)}
#assumes pre_interpolated has been interpolated against the original time history
#assumes pre_interpolated is a list of dataframes where each dataframe is a time history
#single is a single dataframe representing one time history/run of the simulation
def interpolate_experimental(self,pre_interpolated = [], single = None):
'''
This function interpolates the time history of a physical sensitivity
against the corresponding experimental data and returns a pandas
dataframe.
Parameters
----------
pre_interpolated : list, optional
has been interpolated against the original time history,
assumes pre_interpolated is a list of dataframes where each dataframe
is a time history. The default is [].
single : Pandas Data Frame, optional
Single Pandas Data Frame to be interpolated against experimental
data. The default is None.
Returns
-------
int_exp: Pandas Data Frame
Interpolated data frame.
'''
if self.timeHistories == None:
print("Error: can't interpolate without time histories")
return -1
if self.experimentalData == None:
print("Error: must have experimental data before interpolation")
return -1
if len(pre_interpolated)!=0 and single != None:
print("Error: can only specify one of pre_interpolated, single")
if single is not None:
pre_interpolated = [single]
int_exp = []
#This portion of the function removes the pressure and the temperature from the pre_interpolated frames
#so that when it gets interpolated against experimental data the temperature and pressure are not there
if isinstance(pre_interpolated[0],pd.DataFrame):
if 'pressure' in pre_interpolated[0].columns.tolist() and 'temperature' in pre_interpolated[0].columns.tolist():
pre_interpolated = [df.drop(columns=['temperature','pressure']) for df in pre_interpolated]
#make sure you put the observables list in the correct order
#check what order the experimental list is parsed in
#making new observables list
if single is not None:
mole_fraction_and_concentration_observables= self.moleFractionObservables + self.concentrationObservables
mole_fraction_and_concentration_observables = [x for x in mole_fraction_and_concentration_observables if x is not None]
for time_history in pre_interpolated:
array_list = []
max_size = 0
for i, observable in enumerate(mole_fraction_and_concentration_observables):
interpolated_column = np.interp(self.experimentalData[i]['Time'].values,
self.timeHistories[0]['time'].values,
time_history[observable].values)
interpolated_column = np.reshape(interpolated_column,((interpolated_column.shape[0],1)))
array_list.append(interpolated_column)
max_size = max(interpolated_column.shape[0],max_size)
padded_arrays = []
for arr in array_list:
if arr.shape[0] < max_size:
padded_arrays.append(np.pad(arr,
((0,max_size-arr.shape[0]),(0,0)),
'constant',constant_values = np.nan))
else:
padded_arrays.append(arr)
np_array = np.hstack((padded_arrays))
new_frame = pd.DataFrame(np_array)
int_exp.append(new_frame)
for x in int_exp:
x.columns = mole_fraction_and_concentration_observables
return int_exp[0]
#check and make sure this part actually works for what we want for interpolating the pre interpolated time histories
#make sure we are getting the correct columns
else:
for time_history in pre_interpolated:
array_list = []
max_size = 0
for i,frame in enumerate(self.experimentalData): #each frame is data for one observable
if i>len(self.observables):
break
#change these bboth to use observable
# interpolated_column= np.interp(frame.ix[:,0],
# self.timeHistories[0]['time'],
# time_history.ix[:,i])
interpolated_column= np.interp(frame.iloc[:,0],
self.timeHistories[0]['time'],
time_history.iloc[:,i])
interpolated_column= np.reshape(interpolated_column,
((interpolated_column.shape[0],1)))
array_list.append(interpolated_column)
max_size = max(interpolated_column.shape[0],max_size)
padded_arrays= []
for arr in array_list:
if arr.shape[0] < max_size:
padded_arrays.append(np.pad(arr,
((0,max_size - arr.shape[0]),(0,0)),
'constant',constant_values=np.nan))
else:
padded_arrays.append(arr)
np_array = np.hstack((padded_arrays))
new_frame = pd.DataFrame(np_array)
int_exp.append(new_frame)
for x in int_exp:
x.columns = self.observables[0:len(self.experimentalData)]
if single is not None:
return int_exp[0]
else:
return int_exp
def interpolation(self,originalValues,newValues, thingBeingInterpolated):
'''
This function is the base interpolation function, interpolating one set
of data to another on a per time basis and returns a pandas dataframe.
Parameters
----------
originalValues : Pandas Data Frame
Original dataframe of time history.
newValues : Pandas Data Frame
New dataframe of time history.
thingBeingInterpolated : list
List of observable names.
Returns
-------
interpolatedData: Pandas Data Frame
Interpolated Data Frame.
'''
#interpolating time histories to original time history
if isinstance(originalValues,pd.DataFrame) and isinstance(newValues,pd.DataFrame):
tempDfForInterpolation = newValues[thingBeingInterpolated]
#tempListForInterpolation = [tempDfForInterpolation.ix[:,x].values for x in range(tempDfForInterpolation.shape[1])]
tempListForInterpolation = [tempDfForInterpolation.iloc[:,x].values for x in range(tempDfForInterpolation.shape[1])]
interpolatedData = [np.interp(originalValues['time'].values,newValues['time'].values,tempListForInterpolation[x]) for x in range(len(tempListForInterpolation))]
interpolatedData = [pd.DataFrame(interpolatedData[x]) for x in range(len(interpolatedData))]
interpolatedData = pd.concat(interpolatedData, axis=1,ignore_index=True)
interpolatedData.columns = thingBeingInterpolated
else:
print("Error: values must be pandas dataframes")
return -1
return interpolatedData
def sensitivityCalculation(self,originalValues,newValues,thingToFindSensitivtyOf,dk=.01):
'''
This function calculates log/log sensitivity and returns a
pandas dataframe.
Parameters
----------
originalValues : Pandas Data Frame
Original dataframe of time history.
newValues : Pandas Data Frame
New dataframe of time history.
thingToFindSensitivtyOf : list
List of observable names.
dk : float, optional
The decimal value of the percentage by which the original value
was perturbed. The default is .01.
Returns
-------
sensitivity: Pandas Data Frame
Sensitivity of observables.
'''
if isinstance(originalValues,pd.DataFrame) and isinstance(newValues,pd.DataFrame):
newValues.columns = thingToFindSensitivtyOf
newValues = newValues.applymap(np.log)
originalValues = originalValues.applymap(np.log)
#tab
sensitivity = (newValues.subtract(originalValues)/dk)
return sensitivity
else:
print("Error: wrong datatype, both must be pandas data frames")
return -1
def importExperimentalData(self,csvFileList):
'''
This function imports experimental data in csv format and returns a
list of pandas dataframes.
Parameters
----------
csvFileList : list
List of csv file directories.
Returns
-------
experimentalData : list
Experimental data from csv files as pandas data frames
stored in a list.
'''
print('Importing shock tube data the following csv files...')
print(csvFileList)
experimentalData = [pd.read_csv(csv) for csv in csvFileList]
experimentalData = [experimentalData[x].dropna(how='any') for x in range(len(experimentalData))]
experimentalData = [experimentalData[x].apply(pd.to_numeric, errors = 'coerce').dropna() for x in range(len(experimentalData))]
for x in range(len(experimentalData)):
experimentalData[x] = experimentalData[x][~(experimentalData[x][experimentalData[x].columns[1]] < 0)]
self.experimentalData = experimentalData
return experimentalData
def savingInterpTimeHistoryAgainstExp(self,timeHistory):
'''
This function writes the time history which is interpolated against
experimetnal data as a class object.
Parameters
----------
timeHistory : Pandas Data Frame
Time history interpolated against experimental data .
Returns
-------
timeHistoryInterpToExperiment: Pandas Data Frame
Time history that is being saved.
'''
self.timeHistoryInterpToExperiment = timeHistory
def interpolatePressureandTempToExperiment(self,simulation,experimental_data):
'''
This function interpolates the pressure and temperature time history
from a simulation against the corresponding experimental data.
Parameters
----------
simulation : class variable
The simulation assoicated with the time history to be
interpolated.
experimental_data : list
List of corresponding experimental data dataframtes .
Returns
-------
list_of_df : list
List of interpolated data frames for pressure and temperature.
'''
p_and_t = ['pressure','temperature']
list_of_df = []
for df in experimental_data:
temp = []
for variable in p_and_t:
interpolated_data = np.interp(df['Time'],simulation.timeHistories[0]['time'],simulation.timeHistories[0][variable])
interpolated_data = interpolated_data.reshape((interpolated_data.shape[0],1))
temp.append(interpolated_data)
temp = np.hstack(temp)
temp = pd.DataFrame(temp)
temp.columns = p_and_t
list_of_df.append(temp)
self.pressureAndTemperatureToExperiment = list_of_df
return list_of_df
def calculate_time_shift_sensitivity(self,simulation,experimental_data,dk=1e-8):
'''
This function interpolates the pressure and temperature time history
from a simulation against the corresponding experimental data.
Parameters
----------
simulation : class variable
The simulation assoicated with the time history to
be interpolated.
experimental_data : list
List of corresponding experimental data dataframtes .
dk : float, optional
Decimal percentage by which time values are perturbed. Default is
1e-8.
Returns
-------
time_shift_sensitivity : Pandas Data Frame
List of Pandas Data Frames containing time shift sensitivity
values.
'''
lst_obs = simulation.moleFractionObservables + simulation.concentrationObservables
lst_obs = [i for i in lst_obs if i]
one_percent_of_average = dk
original_time = simulation.timeHistories[0]['time']
new_time = original_time + one_percent_of_average
#interpolate to the orignal time
interpolated_against_original_time = []
for i,obs in enumerate(lst_obs):
interpolated_original_observable_against_original_time = np.interp(original_time,new_time,simulation.timeHistories[0][lst_obs[i]])
s1 = pd.Series(interpolated_original_observable_against_original_time,name=lst_obs[i])
interpolated_against_original_time.append(s1)
observables_interpolated_against_original_time_df = | pd.concat(interpolated_against_original_time,axis=1) | pandas.concat |
from examples.residential_mg_with_pv_and_dewhs.modelling.micro_grid_models import (DewhModel, GridModel, PvModel,
ResDemandModel)
from models.agents import ControlledAgent
from typing import MutableMapping, AnyStr, Tuple as Tuple_T
from utils.func_utils import ParNotSet
import cvxpy as cvx
import cvxpy.expressions.expression as cvx_e
from abc import abstractmethod, ABC
import pandas as pd
from datetime import datetime as DateTime
import numpy as np
from utils.matrix_utils import atleast_2d_col
from controllers.mpc_controller import MpcController
from controllers.controller_base import (ControllerBase, ControllerBuildRequiredError, ConstraintSolvedController,
MldSimLog, ControllerSolverError)
from structdict import StructDict, named_struct_dict
import itertools
import warnings
class MicroGridAgentBase(ControlledAgent, ABC):
@abstractmethod
def __init__(self, device_type=None, device_id=None,
sim_model=ParNotSet, control_model=ParNotSet,
omega_profile=None, profile_t0=DateTime(2018, 12, 3),
omega_scenarios_profile=None, scenarios_t0=pd.Timedelta(0), forecast_lag='1D'):
super().__init__(device_type=device_type, device_id=device_id,
sim_model=sim_model, control_model=control_model)
self.omega_profile = None
self.profile_t0 = profile_t0
self.set_omega_profile(omega_profile=omega_profile, profile_t0=profile_t0)
self.omega_scenarios = None
self._omega_scenario_values = None
self.intervals_per_day = None
self.num_scenarios = None
self.scenarios_t0 = scenarios_t0
self.set_omega_scenarios(omega_scenarios_profile=omega_scenarios_profile, scenarios_t0=scenarios_t0)
self.forecast_lag = forecast_lag
self._variables_k_act = None
@property
def N_tilde(self):
def N_tilde_getter(controller: ControllerBase):
return controller.N_tilde
return self._get_controllers_attribute(getter_func_or_name=N_tilde_getter)
@property
def x_k(self):
def x_getter(controller: ControllerBase):
return controller.x_k
return self._get_controllers_attribute(getter_func_or_name=x_getter)
@x_k.setter
def x_k(self, value):
self.set_x_k(x_k_or_struct=value)
@property
def omega_tilde_k(self):
def omega_getter(controller: ControllerBase):
return controller.omega_tilde_k
return self._get_controllers_attribute(getter_func_or_name=omega_getter)
@omega_tilde_k.setter
def omega_tilde_k(self, value):
self.set_omega_tilde_k(omega_tilde_k_or_struct=value)
def set_x_k(self, controller_name=ParNotSet, x_k_or_struct=None):
def set_controller_x(controller: ControllerBase, x_k):
controller.x_k = x_k
self._set_controllers_attribute(setter_func_or_name=set_controller_x, controller_name=controller_name,
attribute_or_struct=x_k_or_struct)
def set_omega_tilde_k(self, controller_name=ParNotSet, omega_tilde_k_or_struct=None):
def set_controller_omega(controller: ControllerBase, omega_tilde_k):
controller.omega_tilde_k = omega_tilde_k
self._set_controllers_attribute(setter_func_or_name=set_controller_omega, controller_name=controller_name,
attribute_or_struct=omega_tilde_k_or_struct)
def _set_controllers_attribute(self, setter_func_or_name, controller_name=ParNotSet, attribute_or_struct=None):
if isinstance(setter_func_or_name, str):
attribute_name = setter_func_or_name
def setter_func(controller: ControllerBase, attribute):
setattr(controller, attribute_name, attribute)
else:
setter_func = setter_func_or_name
if not isinstance(attribute_or_struct, dict):
attribute_struct = dict.fromkeys(self.controllers, attribute_or_struct)
else:
attribute_struct = attribute_or_struct
if controller_name is not ParNotSet:
setter_func(self.controllers[controller_name], attribute_struct[controller_name])
else:
for cname, controller in self.controllers.items():
setter_func(controller, attribute_struct[cname])
ControllersAttrStruct = named_struct_dict('ControllersAttrStruct')
def _get_controllers_attribute(self, getter_func_or_name, controller_name=ParNotSet):
if isinstance(getter_func_or_name, str):
attribute_name = getter_func_or_name
def getter_func(controller: ControllerBase):
getattr(controller, attribute_name, ParNotSet)
else:
getter_func = getter_func_or_name
struct = self.ControllersAttrStruct()
if controller_name is not ParNotSet:
struct[controller_name] = getter_func(self.controllers[controller_name])
else:
struct.update({cname: getter_func(controller) for cname, controller in self.controllers.items()})
return struct
@property
def sim_logs(self) -> MutableMapping[AnyStr, MldSimLog]:
return StructDict(
{controller_name: controller.sim_log for controller_name, controller in self._controllers.items()})
@property
def sim_dataframe(self):
dfs = {}
for cname, controller in self.controllers.items():
df = pd.concat([controller.sim_log.get_concat_log()], keys=[(self.device_type, self.device_id, cname)],
names=['device_type', 'device_id', 'controller'], axis=1, copy=False)
dfs[cname] = df
return pd.concat(dfs.values(), axis=1) if dfs else pd.DataFrame()
def get_variables_k_act(self, k):
return StructDict(
{controller_name: controller.sim_log.get(k) for controller_name, controller in self._controllers})
def set_omega_scenarios(self, omega_scenarios_profile=ParNotSet, scenarios_t0=None):
scenarios_t0 = scenarios_t0 or self.scenarios_t0
omega_scenarios = omega_scenarios_profile
if omega_scenarios_profile is not ParNotSet and omega_scenarios_profile is not None:
omega_scenarios_profile = pd.DataFrame(omega_scenarios_profile)
n, m = omega_scenarios_profile.shape
if m != self.mld_info.nomega:
raise ValueError(
f"omega_scenarios_profile must have column dimension of 'nomega':{self.mld_info.nomega} not {m}.")
self.intervals_per_day = intervals_per_day = pd.Timedelta('1D') // pd.Timedelta(seconds=self.mld_info.ts)
self.num_scenarios = num_scenarios = n // intervals_per_day
omega_scenarios_profile.index = pd.timedelta_range(scenarios_t0, periods=n,
freq=pd.Timedelta(seconds=self.mld_info.ts))
omega_scenarios_profile.reindex(pd.timedelta_range(scenarios_t0, periods=num_scenarios * intervals_per_day,
freq=pd.Timedelta(seconds=self.mld_info.ts)))
omega_scenarios_profile.index.name = 'TimeDelta'
omega_scenarios = omega_scenarios_profile.stack().values.reshape(intervals_per_day * m, -1, order='F')
multi_index = pd.MultiIndex.from_product(
[pd.timedelta_range(scenarios_t0, periods=intervals_per_day,
freq=pd.Timedelta(seconds=self.mld_info.ts)),
[f'omega_{index}' for index in range(m)]])
omega_scenarios = pd.DataFrame(omega_scenarios, index=multi_index)
self.omega_scenarios = (omega_scenarios if omega_scenarios is not ParNotSet else
self.omega_scenarios)
self._omega_scenario_values = self.omega_scenarios.values if isinstance(omega_scenarios, pd.DataFrame) else None
def set_omega_profile(self, omega_profile=ParNotSet, profile_t0=None):
profile_t0 = profile_t0 or self.profile_t0
if omega_profile is not None and omega_profile is not ParNotSet:
omega_profile = pd.DataFrame(omega_profile)
n, m = omega_profile.shape
if m != self.mld_info.nomega:
raise ValueError(
f"Omega profile must have column dimension of 'nomega':{self.mld_info.nomega} not {m}.")
omega_profile.columns = [f'omega_{index}' for index in range(m)]
omega_profile.index = pd.date_range(profile_t0, periods=n, freq= | pd.Timedelta(seconds=self.mld_info.ts) | pandas.Timedelta |
import numpy as np
import os
import pandas as pd
import pickle
import unittest
from predictor import Predictor
from scipy.signal import savgol_filter
class PredictorTests(unittest.TestCase):
def setUp(self) -> None:
self.predictor = Predictor()
def test_create_predictor(self):
"""
Test if predictor class exists.
"""
self.assertIsNotNone(self.predictor)
def test_predict_method(self):
"""
Test if predict method of predictor class exists.
"""
self.predictor.predict()
def test_predict_method_with_model_type(self):
"""
Test if predict method had parameter which is responsible for the model type.
"""
self.predictor.predict(linear=True)
def test_predict_method_with_features_type(self):
"""
Test if predict method had parameter which is responsible for the features type.
"""
self.predictor.predict(features_type="simple")
def test_predict_method_with_model(self):
"""
Test if predict method had parameter which is responsible for the file with model.
"""
self.predictor.predict(model_filename="non-linear-accelerometer.pcl")
def test_predict_method_return_value(self):
"""
Test if predict method had parameter which is responsible for the model type.
"""
data = pd.read_csv(os.path.join("data", "train_filtered_accelerometer.csv"))
data['acceleration'] = np.sqrt(
data['x_accelerometer'] ** 2 + data['y_accelerometer'] ** 2 + data['z_accelerometer'] ** 2)
data = data.drop(['event'], axis=1)
with open(os.path.join("models", "non-linear-accelerometer.pcl"), "rb") as file:
model = pickle.load(file)
y_true = model.predict(data).tolist()[:10]
y_pred = self.predictor.predict(data, linear=False, model_filename="non-linear-accelerometer.pcl",
features="simple")[:10]
self.assertListEqual(y_true, y_pred)
def test_preprocess_feature_method(self):
"""
Test if preprocess_feature method of predictor class exists.
"""
self.predictor.preprocess_feature()
def test_preprocess_feature_method_with_filtering_and_parameters(self):
"""
Test if preprocess_feature method had parameters
which are responsible for the filtering and filtering parameters.
"""
self.predictor.preprocess_feature(filtering=savgol_filter, window_length=51, polyorder=5)
def test_preprocess_feature_method_with_filtering_and_parameters_returned_value(self):
"""
Test if preprocess_feature method with passed filtering and filtering parameters work correctly.
"""
window_length = 51
polyorder = 5
x = np.random.uniform(-1, 0, 100)
filtered = savgol_filter(x, window_length=window_length, polyorder=polyorder).tolist()
filtered_with_predictor = self.predictor.preprocess_feature(feature=x, filtering=savgol_filter,
window_length=window_length,
polyorder=polyorder).tolist()
self.assertListEqual(filtered, filtered_with_predictor)
def test_preprocess_feature_method_with_model_type(self):
"""
Test if preprocess_feature method had parameters which are responsible for the model type.
"""
self.predictor.preprocess_feature(linear=True)
def test_preprocess_feature_method_with_checking_model_type(self):
"""
Test if preprocess_feature method with passed model type works correctly.
"""
path_to_the_scaler = os.path.join("models", "x_accelerometer.pcl")
with open(path_to_the_scaler, "rb") as file:
scaler = pickle.load(file)
window_length = 51
polyorder = 5
x = np.random.uniform(-1, 0, 100)
filtered = savgol_filter(x, window_length=window_length, polyorder=polyorder)
scaled = scaler.transform(filtered.reshape(-1, 1)).tolist()
scaled_with_predictor = self.predictor.preprocess_feature(feature=x,
path_to_the_scaler=path_to_the_scaler,
filtering=savgol_filter,
window_length=window_length,
polyorder=polyorder).tolist()
self.assertListEqual(scaled, scaled_with_predictor)
def test_preprocess_method(self):
"""
Test if preprocess method of predictor class exists.
"""
self.predictor.preprocess()
def test_preprocess_method_with_parameters(self):
"""
Test if preprocess method with passed parameters works correctly.
"""
data = pd.read_csv(os.path.join("data", "train_accelerometer.csv"))
data = data.drop(['event'], axis=1)
window_length = 51
polyorder = 5
preprocessed_data = self.predictor.preprocess(data=data, filtering=savgol_filter, window_length=window_length,
polyorder=polyorder)
result = preprocessed_data.loc[0].values.tolist()
data_new = pd.DataFrame()
for column in data.columns:
data_new[column] = savgol_filter(data[column], window_length=window_length, polyorder=polyorder)
true_values = data_new.loc[0].values.tolist()
self.assertListEqual(true_values, result)
def test_preprocess_method_with_filtering_and_scaling(self):
"""
Test if preprocess method makes scaling of features.
"""
data = pd.read_csv(os.path.join("data", "train_accelerometer.csv"))
data = data.drop(['event'], axis=1)
window_length = 51
polyorder = 5
preprocessed_data = self.predictor.preprocess(data=data, linear=True, filtering=savgol_filter,
window_length=window_length, polyorder=polyorder)
result = preprocessed_data.loc[0].values.tolist()
data_new = pd.DataFrame()
for column in sorted(data.columns):
data_new[column] = savgol_filter(data[column], window_length=window_length, polyorder=polyorder)
with open(os.path.join("models", f"{column}.pcl"), "rb") as file:
scaler = pickle.load(file)
data_new[column] = scaler.transform(data_new[column].values.reshape(-1, 1))
true_values = data_new.loc[0].values.tolist()
self.assertListEqual(true_values, result)
def test_preprocess_method_with_gyroscope(self):
"""
Test if preprocess method works with gyroscope features.
"""
data = pd.read_csv(os.path.join("data", "train_accelerometer_gyroscope.csv"))
data = data.drop(['event'], axis=1)
window_length = 51
polyorder = 5
preprocessed_data = self.predictor.preprocess(data=data, filtering=savgol_filter, window_length=window_length,
polyorder=polyorder)
result = preprocessed_data.loc[0].values.tolist()
data_new = pd.DataFrame()
for column in data.columns:
data_new[column] = savgol_filter(data[column], window_length=window_length, polyorder=polyorder)
true_values = data_new.loc[0].values.tolist()
self.assertListEqual(true_values, result)
def test_predict_method_with_preprocess_filtering(self):
"""
Test if predict method makes preprocessing.
"""
data = pd.read_csv(os.path.join("data", "train_accelerometer.csv"))
data = data.drop(['event'], axis=1)
window_length = 51
polyorder = 5
with open(os.path.join("models", "non-linear-accelerometer.pcl"), "rb") as file:
model = pickle.load(file)
data_new = | pd.DataFrame() | pandas.DataFrame |
import glob
import pandas as pd
from configparser import ConfigParser
import os
from simba.drop_bp_cords import *
def create_emty_df(shape_type):
if shape_type == 'rectangle':
col_list = ['Video', 'Shape_type', 'Name', 'Color name', 'Color BGR', 'Thickness', 'topLeftX',
'topLeftY', 'Bottom_right_X', 'Bottom_right_Y', 'width', 'height', 'Tags', 'Ear_tag_size']
if shape_type == 'circle':
col_list = ['Video', 'Shape_type', 'Name', 'Color name', 'Color BGR', 'Thickness', 'centerX', 'centerY',
'radius', 'Tags', 'Ear_tag_size']
if shape_type == 'polygon':
col_list = ['Video', 'Shape_type', 'Name', 'Color name', 'Color BGR', 'Thickness', 'Center_X'
'Center_Y', 'vertices', 'Tags', 'Ear_tag_size']
return | pd.DataFrame(columns=col_list) | pandas.DataFrame |
import pandas as pd
from pkg_resources import resource_filename
from .data_simulator import SimulatedData
from .base import survival_stats
from .base import survival_df
from .base import survival_dmat
__ALL__ = [
"survival_stats",
"survival_df",
"survival_dmat",
"load_simulated_data",
"load_metabric",
"load_metabric_train",
"load_metabric_test",
"load_whas",
"load_whas_train",
"load_whas_test",
"load_data",
"SimulatedData"
]
def _load_dataset(filename, **kwargs):
"""
Load a dataset from libsurv.datasets
Parameters
----------
filename : str
File path of dataset, for example "whas_train.csv".
usecols : list
list of columns in file to use.
Returns
-------
DataFrame
dataset.
"""
return pd.read_csv(resource_filename("libsurv", "datasets/src/" + filename), engine="python", **kwargs)
def load_metabric_train(**kwargs):
"""
Load a training dataset of METABRIC
Notes
-----
See `load_metabric` for more details.
"""
return _load_dataset("metabric_train.csv", **kwargs)
def load_metabric_test(**kwargs):
"""
Load a test dataset of METABRIC
Notes
-----
See `load_metabric` for more details.
"""
return _load_dataset("metabric_test.csv", **kwargs)
def load_metabric(**kwargs):
"""
Load a dataset of METABRIC.
Notes
-----
The Molecular Taxonomy of Breast Cancer International Consortium (METABRIC) investigates
the effect of gene and protein expression profiles on breast cancer survival, and help
physicians design better treatment recommendations. Statistics are below:
# Rows: 1903
# Columns: 9 + Event + Time
# Event Ratio: 57.96%
# Min Time: 1 (months)
# Max Time: 356 (months)
"""
data_train = load_metabric_train(**kwargs)
data_test = load_metabric_test(**kwargs)
# merge into a dataframe
data = pd.concat([d_train, d_test], axis=0)
return data.reset_index(drop=True)
def load_whas_train(**kwargs):
"""
Load a training dataset of WHAS
Notes
-----
See `load_whas` for more details.
"""
return _load_dataset("whas_train.csv", **kwargs)
def load_whas_test(**kwargs):
"""
Load a test dataset of WHAS
Notes
-----
See `load_whas` for more details.
"""
return _load_dataset("whas_test.csv", **kwargs)
def load_whas(**kwargs):
"""
Load a dataset of WHAS.
Notes
-----
WHAS(the Worcester Heart Attack Study (WHAS) [18] studies the survival of acute myocardial
infraction (MI). Statistics are below:
# Rows: 1638
# Columns: 5 + Event + Time
# Event Ratio: 42.12%
# Min Time: 1 (months)
# Max Time: 67 (months)
"""
data_train = load_whas_train(**kwargs)
data_test = load_whas_test(**kwargs)
# merge into a dataframe
data = pd.concat([d_train, d_test], axis=0)
return data.reset_index(drop=True)
def load_simulated_data(hr_ratio,
N=1000, num_features=10, num_var=2,
average_death=5, end_time=15,
method="gaussian",
gaussian_config={},
seed=42):
"""
Load simulated data generated by the exponentional distribution.
Parameters
----------
hr_ratio: int or float
`lambda_max` hazard ratio.
N: int
The number of observations.
average_death: int or float
Average death time that is the mean of the Exponentional distribution.
end_time: int or float
Censoring time that represents an 'end of study'. Any death
time greater than end_time will be censored.
num_features: int
Size of observation vector. Default: 10.
num_var: int
Number of varaibles simulated data depends on. Default: 2.
method: string
The type of simulated data. 'linear' or 'gaussian'.
gaussian_config: dict
Dictionary of additional parameters for gaussian simulation.
seed: int
Random state.
Returns
-------
DataFrame
A simulated survival dataset following the given args.
Notes
-----
<NAME>. Generating survival times to simulate cox proportional
hazards models with time-varying covariates. Statistics in medicine,
31(29):3946-3958, 2012.
"""
generator = SimulatedData(hr_ratio, average_death=average_death, end_time=end_time,
num_features=num_features, num_var=num_var)
raw_data = generator.generate_data(N, method=method, gaussian_config=gaussian_config, seed=seed)
# To DataFrame
df = pd.DataFrame(raw_data['x'], columns=['x_' + str(i) for i in range(num_features)])
df['e'] = raw_data['e']
df['t'] = raw_data['t']
return df
def load_data(file_path, t_col='t', e_col='e', excluded_cols=[],
split_ratio=1.0, normalize=False, seed=42):
"""
load csv file and return standard survival data for traning or testing.
Parameters
----------
file_path: str
File path. It only supports csv file.
t_col: str
Colname of observed time in your data.
e_col: str
Colname of observed status in your data.
excluded_cols: list
Columns will not be included in the final data.
split_ratio: float
If `split_ratio` is set to 1.0, then full data will be obtained. Otherwise, the
splitted data will be returned.
normalize: bool
If true, then data will be normalized by the equation x = (x - meam / std).
seed: int
Random seed for splitting data.
Returns
------
pandas.DataFrame
Or tuple of two DataFrames if split_ratio is less than 1.0.
"""
# Read csv data
data_all = pd.read_csv(filename)
# list columns out
Y_cols = [t_col, e_col]
_not_int_x_cols = Y_cols + excluded_cols
X_cols = [x for x in data_all.columns if x not in _not_int_x_cols]
X = data_all[X_cols]
y = data_all[Y_cols]
# Normalized data
if normalize:
for col in X_cols:
X.loc[:, col] = (X.loc[:, col] - X.loc[:, col].mean()) / (X.loc[:, col].max() - X.loc[:, col].min())
# Split data
if split_ratio == 1.0:
train_X, train_y = X, y
else:
sss = ShuffleSplit(n_splits=1, test_size=1 - split_ratio, random_state=seed)
for train_index, test_index in sss.split(X, y):
train_X, test_X = X.loc[train_index, :], X.loc[test_index, :]
train_y, test_y = y.loc[train_index, :], y.loc[test_index, :]
# print infos of training data
print("# rows: ", len(train_X))
print("# x_cols: ", len(train_X.columns))
print("# y_cols: ", len(train_y.columns))
print("X columns:", train_X.columns)
print("Y columns:", train_y.columns)
if split_ratio == 1.0:
return | pd.concat([train_X, train_y], axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 8 16:49:18 2020
@author: kwanale1
"""
import pandas as pd
from shapely.geometry import Multipolygon, Polygon, Point
#voting_shp = gpd.read_file("VOTING_SUBDIVISION_2014_WGS84.shp")
#wards_shp = gpd.read_file("icitw_wgs84.shp")
#print(wards_shp)
#shapefile.plot()
#print(wards_shp.columns)
#print(wards_shp.head)
#Spatial Dataset Generator
#Two multi-polgyons being two wards in Toronto
#wards_shp_two = wards_shp.loc[0:1]
#print(wards_shp_two.columns)
#reference cell: first row, geometry column
poly0a = Polygon([(0,0),(1,0),(1,1.5),(0,1.5)])
poly0b = Polygon([(0,1.5),(1,1.5),(1,3),(0,3)])
poly1a = Polygon([(0,0),(1,0),(1,1),(0,1)])
poly1b = Polygon([(0,1),(1,1),(1,2),(0,2)])
poly1c = Polygon([(0,2),(1,2),(1,3),(0,3)])
#1. Generate points within a multipolygon for second dist
#Generate 2 spatial csv files
# First csv is reference distribution with location column using Regions, 80/20 split
# Second csv is secondary distribution that uses points, 50/50 split
poly_ref = []
A_ref = []
A_ref += [0]*20
A_ref += [1]*50
A_ref += [0]*30
poly_sec = []
A_sec = []
A_sec += [0]*30
A_sec += [1]*60
A_sec += [0]*10
i = 0
while i < 50:
poly_ref.append(poly0a)
poly_sec.append(poly1a)
##polypoints.append(poly0.representative_point()) #generate a point within poly0
i += 1
while i < 80:
poly_ref.append(poly0a)
poly_sec.append(poly1b)
i += 1
while i < 100:
poly_ref.append(poly0b)
poly_sec.append(poly1c)
i += 1
df_ref = pd.DataFrame(data=poly_ref,columns=['Region'])
df_ref['A']= | pd.Series(A_ref) | pandas.Series |
# -*- coding: utf-8 -*-
# @author: Elie
#%% ==========================================================
# Import libraries set library params
# ============================================================
import pandas as pd
import numpy as np
import os
pd.options.mode.chained_assignment = None #Pandas warnings off
#plotting
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.ticker import Locator
import matplotlib as mpl
# stats
from scipy import stats
#set matplotlib rcparams
mpl.rcParams['savefig.transparent'] = "False"
mpl.rcParams['axes.facecolor'] = "white"
mpl.rcParams['figure.facecolor'] = "white"
mpl.rcParams['font.size'] = "5"
plt.rcParams['savefig.transparent'] = "False"
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['figure.facecolor'] = "white"
plt.rcParams['font.size'] = "5"
#%% ==========================================================
# define these feature/headers here in case the headers
# are out of order in input files (often the case)
# ============================================================
snv_categories = ["sample",
"A[C>A]A", "A[C>A]C", "A[C>A]G", "A[C>A]T",
"C[C>A]A", "C[C>A]C", "C[C>A]G", "C[C>A]T",
"G[C>A]A", "G[C>A]C", "G[C>A]G", "G[C>A]T",
"T[C>A]A", "T[C>A]C", "T[C>A]G", "T[C>A]T",
"A[C>G]A", "A[C>G]C", "A[C>G]G", "A[C>G]T",
"C[C>G]A", "C[C>G]C", "C[C>G]G", "C[C>G]T",
"G[C>G]A", "G[C>G]C", "G[C>G]G", "G[C>G]T",
"T[C>G]A", "T[C>G]C", "T[C>G]G", "T[C>G]T",
"A[C>T]A", "A[C>T]C", "A[C>T]G", "A[C>T]T",
"C[C>T]A", "C[C>T]C", "C[C>T]G", "C[C>T]T",
"G[C>T]A", "G[C>T]C", "G[C>T]G", "G[C>T]T",
"T[C>T]A", "T[C>T]C", "T[C>T]G", "T[C>T]T",
"A[T>A]A", "A[T>A]C", "A[T>A]G", "A[T>A]T",
"C[T>A]A", "C[T>A]C", "C[T>A]G", "C[T>A]T",
"G[T>A]A", "G[T>A]C", "G[T>A]G", "G[T>A]T",
"T[T>A]A", "T[T>A]C", "T[T>A]G", "T[T>A]T",
"A[T>C]A", "A[T>C]C", "A[T>C]G", "A[T>C]T",
"C[T>C]A", "C[T>C]C", "C[T>C]G", "C[T>C]T",
"G[T>C]A", "G[T>C]C", "G[T>C]G", "G[T>C]T",
"T[T>C]A", "T[T>C]C", "T[T>C]G", "T[T>C]T",
"A[T>G]A", "A[T>G]C", "A[T>G]G", "A[T>G]T",
"C[T>G]A", "C[T>G]C", "C[T>G]G", "C[T>G]T",
"G[T>G]A", "G[T>G]C", "G[T>G]G", "G[T>G]T",
"T[T>G]A", "T[T>G]C", "T[T>G]G", "T[T>G]T"]
indel_categories = ["sample",
"1:Del:C:0", "1:Del:C:1", "1:Del:C:2", "1:Del:C:3", "1:Del:C:4", "1:Del:C:5",
"1:Del:T:0", "1:Del:T:1", "1:Del:T:2", "1:Del:T:3", "1:Del:T:4", "1:Del:T:5",
"1:Ins:C:0", "1:Ins:C:1", "1:Ins:C:2", "1:Ins:C:3", "1:Ins:C:4", "1:Ins:C:5",
"1:Ins:T:0", "1:Ins:T:1", "1:Ins:T:2", "1:Ins:T:3", "1:Ins:T:4", "1:Ins:T:5",
"2:Del:R:0", "2:Del:R:1", "2:Del:R:2", "2:Del:R:3", "2:Del:R:4", "2:Del:R:5",
"3:Del:R:0", "3:Del:R:1", "3:Del:R:2", "3:Del:R:3", "3:Del:R:4", "3:Del:R:5",
"4:Del:R:0", "4:Del:R:1", "4:Del:R:2", "4:Del:R:3", "4:Del:R:4", "4:Del:R:5",
"5:Del:R:0", "5:Del:R:1", "5:Del:R:2", "5:Del:R:3", "5:Del:R:4", "5:Del:R:5",
"2:Ins:R:0", "2:Ins:R:1", "2:Ins:R:2", "2:Ins:R:3", "2:Ins:R:4", "2:Ins:R:5",
"3:Ins:R:0", "3:Ins:R:1", "3:Ins:R:2", "3:Ins:R:3", "3:Ins:R:4", "3:Ins:R:5",
"4:Ins:R:0", "4:Ins:R:1", "4:Ins:R:2", "4:Ins:R:3", "4:Ins:R:4", "4:Ins:R:5",
"5:Ins:R:0", "5:Ins:R:1", "5:Ins:R:2", "5:Ins:R:3", "5:Ins:R:4", "5:Ins:R:5",
"2:Del:M:1", "3:Del:M:1", "3:Del:M:2", "4:Del:M:1", "4:Del:M:2", "4:Del:M:3",
"5:Del:M:1", "5:Del:M:2", "5:Del:M:3", "5:Del:M:4", "5:Del:M:5"]
cnv_categories = ["sample",
"BCper10mb_0", "BCper10mb_1", "BCper10mb_2", "BCper10mb_3",
"CN_0", "CN_1", "CN_2", "CN_3", "CN_4", "CN_5", "CN_6", "CN_7", "CN_8",
"CNCP_0", "CNCP_1", "CNCP_2", "CNCP_3", "CNCP_4", "CNCP_5", "CNCP_6", "CNCP_7",
"BCperCA_0", "BCperCA_1", "BCperCA_2", "BCperCA_3", "BCperCA_4", "BCperCA_5",
"SegSize_0", "SegSize_1", "SegSize_2", "SegSize_3", "SegSize_4", "SegSize_5",
"SegSize_6", "SegSize_7", "SegSize_8", "SegSize_9", "SegSize_10",
"CopyFraction_0", "CopyFraction_1", "CopyFraction_2", "CopyFraction_3", "CopyFraction_4",
"CopyFraction_5", "CopyFraction_6"]
#%% ==========================================================
# make concat sig dataframe
# ============================================================
def load_data(snv_counts_path, indel_counts_path, cnv_counts_path):
df_snv = pd.read_csv(snv_counts_path, sep='\t', low_memory=False)
df_snv = df_snv[snv_categories]
df_snv["sample"] = df_snv["sample"].astype(str)
df_indel = | pd.read_csv(indel_counts_path, sep='\t', low_memory=False) | pandas.read_csv |
"""Materials discovery using Earth Mover's Distance, DensMAP embeddings, and HDBSCAN*.
Create distance matrix, apply densMAP, and create clusters via HDBSCAN* to search for
interesting materials. For example, materials with high-target/low-density (density
proxy) or high-target surrounded by materials with low targets (peak proxy).
"""
# saving class objects: https://stackoverflow.com/a/37076668/13697228
from copy import copy
import dill as pickle
from pathlib import Path
from os.path import join
import gc
from torch.cuda import empty_cache
from warnings import warn
from operator import attrgetter
from chem_wasserstein.utils.Timer import Timer, NoTimer
import matplotlib.pyplot as plt
# from ml_matrics import density_scatter
import numpy as np
import pandas as pd
from scipy.stats import multivariate_normal, wasserstein_distance
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.manifold import MDS
from sklearn.decomposition import PCA
from sklearn.metrics import mean_squared_error
# from sklearn.decomposition import PCA
import umap
import hdbscan
from ElMD import ElMD
from chem_wasserstein.ElM2D_ import ElM2D
# from crabnet.utils.composition import generate_features
from composition_based_feature_vector.composition import generate_features
from crabnet.train_crabnet import get_model
from mat_discover.utils.data import data
from mat_discover.utils.nearest_neigh import nearest_neigh_props
from mat_discover.utils.pareto import pareto_plot # , get_pareto_ind
from mat_discover.utils.plotting import (
umap_cluster_scatter,
cluster_count_hist,
target_scatter,
dens_scatter,
dens_targ_scatter,
)
plt.rcParams.update(
{
"figure.autolayout": True,
"figure.figsize": [3.5, 3.5],
"figure.dpi": 300,
"xtick.direction": "in",
}
)
def my_mvn(mu_x, mu_y, r):
"""Calculate multivariate normal at (mu_x, mu_y) with constant radius, r."""
return multivariate_normal([mu_x, mu_y], [[r, 0], [0, r]])
def cdf_sorting_error(y_true, y_pred, y_dummy=None):
"""Cumulative distribution function sorting error via Wasserstein distance.
Parameters
----------
y_true, y_pred : list of (float or int or str)
True and predicted values to use for sorting error, respectively.
y_dummy : list of (float or int or str), optional
Dummy values to use to generate a scaled error, by default None
Returns
-------
error, dummy_error, scaled_error : float
The unscaled, dummy, and scaled errors that describes the mismatch in sorting
between the CDFs of two lists. The scaled error represents the improvement
relative to the dummy error, such that `scaled_error = error / dummy_error`. If
`scaled_error > 1`, the sorting error is worse than if you took the average of
the `y_true` values as the `y_pred` values. If `scaled_error < 1`, it is better
than this "dummy" regressor. Scaled errors closer to 0 are better.
"""
# Sorting "Error" Setup
n = len(y_true)
u = np.flip(np.cumsum(np.linspace(0, 1, n)))
v = u.copy()
if type(y_true[0]) is str:
lookup = {label: i for (i, label) in enumerate(y_true)}
df = | pd.DataFrame({"y_true": y_true, "y_pred": y_pred}) | pandas.DataFrame |
import root_pandas
import basf2_mva
import b2luigi
import pandas as pd
from sklearn.model_selection import train_test_split
def split_sample(
ntuple_file,
train_size,
test_size,
random_seed=42):
"""Split rootfile and return dataframes. Select 0th candidate."""
df = root_pandas.read_root(ntuple_file)
try:
train, test = train_test_split(
df,
test_size=test_size,
train_size=train_size,
random_state=random_seed)
except ValueError:
print(
"The dataframe was too small, the test and train sample are empty")
empty = {col: [] for col in df.columns}
test = | pd.DataFrame(empty) | pandas.DataFrame |
from typing import Any, Dict
import pandas as pd
from urllib.parse import urlparse, parse_qsl, urlunparse, urlencode
def convert_file_to_list(filelocation):
urls = None
if filelocation.endswith('.csv'):
df = pd.read_csv(filelocation)
urls = df['products'].to_list()
elif filelocation.endswith('.xlsx'):
df = pd.read_excel(filelocation)
urls = df['products'].to_list()
else:
print('Currently only csv and excel file format are supported')
return urls
def save_as(data, data_type, extension):
df = None
if isinstance(data, dict):
df = | pd.DataFrame(data, index=[0]) | pandas.DataFrame |
import random, os, copy, torch, torch.nn as nn, numpy as np, pandas as pd
from sklearn.utils import resample
from collections import defaultdict, Counter
import matplotlib.pyplot as plt
def upsampling(data, target_col_name):
np.random.seed(10)
data_copy = copy.deepcopy(data)
classes_up = np.unique(data_copy[target_col_name].values)
descending_counts_classes = list(data[target_col_name].value_counts().index)
majority = descending_counts_classes.pop(0)
data_majority = data_copy[data_copy[target_col_name]==majority]
new_data_upsampled = copy.deepcopy(data_majority)
for min_class in descending_counts_classes:
data_minority = data_copy[data_copy[target_col_name]==min_class]
# Upsample minority class
data_minority_upsampled = resample(data_minority,
replace=True, # sample with replacement
n_samples=np.sum(data_copy[target_col_name]==majority),
random_state=123) # reproducible results
new_data_upsampled = pd.concat([new_data_upsampled, data_minority_upsampled])
rand = list(range(new_data_upsampled.shape[0]))
np.random.shuffle(rand) #shuffle the indices
new_data_upsampled = new_data_upsampled.iloc[rand] #Re-index the data
return new_data_upsampled
def conceptsInstances_to_numeric(data, Embeddings, with_stats=True, only_stats=False, alpha=1):
random.seed(1)
datapoints = []
targets = []
if with_stats and not only_stats:
for index in range(data.shape[0]):
no_positive_example = True
no_negative_example = True
stats = pd.Series(data.iloc[index]['pos ex stats'][1:-1].split(",")).astype(float).values
stats = stats/max(1,stats[-1])
stats = stats[stats>0]
if len(stats) >= Embeddings.shape[1]:
stats = list(stats[:Embeddings.shape[1]])
else:
stats = list(stats)+[0]*(Embeddings.shape[1]-len(stats))
pos = list(filter(lambda x: not x in {', ', ''}, data.iloc[index]['positive examples'][2:-2].split("'")))
if pos:
no_positive_example = False
pos = pd.Series(pos).apply(lambda x: pd.Series(list(Embeddings.loc[x]))).values
neg = list(filter(lambda x: not x in {', ', ''}, data.iloc[index]['negative examples'][2:-2].split("'")))
#ipdb.set_trace()
if neg:
no_negative_example = False
neg = pd.Series(neg).apply(lambda x: pd.Series(list(random.random()**alpha * Embeddings.loc[x]))).values
#ipdb.set_trace()
if no_negative_example: pos = np.vstack([pos, np.array(stats)])
else: neg = np.vstack([neg, np.array(stats)])
datapoint = np.vstack([pos,neg]) if not no_positive_example and not no_negative_example else pos if no_negative_example else neg
datapoints.append(datapoint[None,:])
targets.append(data.iloc[index]["concept length"])
elif only_stats:
for index in range(data.shape[0]):
stats = pd.Series(data.iloc[index]['pos ex stats'][1:-1].split(",")).astype(float).values
stats = stats/max(1,stats[-1])
datapoints.append(stats[None,:])
targets.append(data.iloc[index]['concept length'])
elif not with_stats:
for index in range(data.shape[0]):
no_positive_example = True
no_negative_example = True
pos = list(filter(lambda x: not x in {', ', ''}, data.iloc[index]['positive examples'][2:-2].split("'")))
if pos:
no_positive_example = False
pos = | pd.Series(pos) | pandas.Series |
try:
from datetime import timedelta,datetime
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from datetime import datetime
import pandas as pd
print("All Dag modules are ok ......")
except Exception as e:
print("Error {} ".format(e))
def first_function_execute(**context):
print("first_function_execute ")
context['ti'].xcom_push(key='mykey', value="first_function_execute says Hello ")
def second_function_execute(**context):
instance = context.get("ti").xcom_pull(key="mykey")
data = [{"name":"Soumil","title":"Full Stack Software Engineer"}, { "name":"Nitin","title":"Full Stack Software Engineer"},]
df = | pd.DataFrame(data=data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 20 10:54:48 2019
@author: raahul46
"""
####DEPENDENCIES####
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import normalize
####PRE PROCESSING FUNCTION####
def preprocessing():
####READING DATASETS####
result = pd.read_csv("gender_submission.csv")
train = pd.read_csv("train.csv")
test = | pd.read_csv("test.csv") | pandas.read_csv |
import pickle
import numpy as np
import pandas as pd
## plot conf
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 7})
width = 8.5/2.54
height = width*(3/4)
###
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
plot_path = './'
## Load true fits
real_fit_male = | pd.read_csv(script_dir+'/plot_pickles/real_male_fit.csv') | pandas.read_csv |
"""Miscellaneous internal PyJanitor helper functions."""
import fnmatch
import functools
import os
import re
import socket
import sys
import warnings
from collections.abc import Callable as dispatch_callable
from itertools import chain, combinations
from typing import (
Callable,
Dict,
Iterable,
List,
NamedTuple,
Optional,
Pattern,
Tuple,
Union,
)
import numpy as np
import pandas as pd
from pandas.api.types import (
CategoricalDtype,
is_extension_array_dtype,
is_list_like,
is_scalar,
)
from pandas.core.common import apply_if_callable
from .errors import JanitorError
def check(varname: str, value, expected_types: list):
"""
One-liner syntactic sugar for checking types.
It can also check callables.
Should be used like this::
check('x', x, [int, float])
:param varname: The name of the variable (for diagnostic error message).
:param value: The value of the varname.
:param expected_types: The types we expect the item to be.
:raises TypeError: if data is not the expected type.
"""
is_expected_type: bool = False
for t in expected_types:
if t is callable:
is_expected_type = t(value)
else:
is_expected_type = isinstance(value, t)
if is_expected_type:
break
if not is_expected_type:
raise TypeError(
"{varname} should be one of {expected_types}".format(
varname=varname, expected_types=expected_types
)
)
def _clean_accounting_column(x: str) -> float:
"""
Perform the logic for the `cleaning_style == "accounting"` attribute.
This is a private function, not intended to be used outside of
``currency_column_to_numeric``.
It is intended to be used in a pandas `apply` method.
:returns: An object with a cleaned column.
"""
y = x.strip()
y = y.replace(",", "")
y = y.replace(")", "")
y = y.replace("(", "-")
if y == "-":
return 0.00
return float(y)
def _currency_column_to_numeric(x, cast_non_numeric=None) -> str:
"""
Perform logic for changing cell values.
This is a private function intended to be used only in
``currency_column_to_numeric``.
It is intended to be used in a pandas `apply` method, after being passed
through `partial`.
"""
acceptable_currency_characters = {
"-",
".",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"0",
}
if len(x) == 0:
return "ORIGINAL_NA"
if cast_non_numeric:
if x in cast_non_numeric.keys():
check(
"{%r: %r}" % (x, str(cast_non_numeric[x])),
cast_non_numeric[x],
[int, float],
)
return cast_non_numeric[x]
return "".join(i for i in x if i in acceptable_currency_characters)
return "".join(i for i in x if i in acceptable_currency_characters)
def _replace_empty_string_with_none(column_series):
column_series.loc[column_series == ""] = None
return column_series
def _replace_original_empty_string_with_none(column_series):
column_series.loc[column_series == "ORIGINAL_NA"] = None
return column_series
def _strip_underscores(
df: pd.DataFrame, strip_underscores: Union[str, bool] = None
) -> pd.DataFrame:
"""
Strip underscores from DataFrames column names.
Underscores can be stripped from the beginning, end or both.
.. code-block:: python
df = _strip_underscores(df, strip_underscores='left')
:param df: The pandas DataFrame object.
:param strip_underscores: (optional) Removes the outer underscores from all
column names. Default None keeps outer underscores. Values can be
either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'
and True.
:returns: A pandas DataFrame with underscores removed.
"""
df = df.rename(
columns=lambda x: _strip_underscores_func(x, strip_underscores)
)
return df
def _strip_underscores_func(
col: str, strip_underscores: Union[str, bool] = None
) -> pd.DataFrame:
"""Strip underscores from a string."""
underscore_options = [None, "left", "right", "both", "l", "r", True]
if strip_underscores not in underscore_options:
raise JanitorError(
f"strip_underscores must be one of: {underscore_options}"
)
if strip_underscores in ["left", "l"]:
col = col.lstrip("_")
elif strip_underscores in ["right", "r"]:
col = col.rstrip("_")
elif strip_underscores == "both" or strip_underscores is True:
col = col.strip("_")
return col
def import_message(
submodule: str,
package: str,
conda_channel: str = None,
pip_install: bool = False,
):
"""
Return warning if package is not found.
Generic message for indicating to the user when a function relies on an
optional module / package that is not currently installed. Includes
installation instructions. Used in `chemistry.py` and `biology.py`.
:param submodule: pyjanitor submodule that needs an external dependency.
:param package: External package this submodule relies on.
:param conda_channel: Conda channel package can be installed from,
if at all.
:param pip_install: Whether package can be installed via pip.
"""
is_conda = os.path.exists(os.path.join(sys.prefix, "conda-meta"))
installable = True
if is_conda:
if conda_channel is None:
installable = False
installation = f"{package} cannot be installed via conda"
else:
installation = f"conda install -c {conda_channel} {package}"
else:
if pip_install:
installation = f"pip install {package}"
else:
installable = False
installation = f"{package} cannot be installed via pip"
print(
f"To use the janitor submodule {submodule}, you need to install "
f"{package}."
)
print()
if installable:
print("To do so, use the following command:")
print()
print(f" {installation}")
else:
print(f"{installation}")
def idempotent(func: Callable, df: pd.DataFrame, *args, **kwargs):
"""
Raises error if a function operating on a `DataFrame` is not idempotent,
that is, `func(func(df)) = func(df)` is not true for all `df`.
:param func: A python method.
:param df: A pandas `DataFrame`.
:param args: Positional arguments supplied to the method.
:param kwargs: Keyword arguments supplied to the method.
:raises ValueError: If `func` is found to not be idempotent for the given
`DataFrame` `df`.
"""
if not func(df, *args, **kwargs) == func(
func(df, *args, **kwargs), *args, **kwargs
):
raise ValueError(
"Supplied function is not idempotent for the given " "DataFrame."
)
def deprecated_alias(**aliases) -> Callable:
"""
Used as a decorator when deprecating old function argument names, while
keeping backwards compatibility.
Implementation is inspired from `StackOverflow`_.
.. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias
Functional usage example:
.. code-block:: python
@deprecated_alias(a='alpha', b='beta')
def simple_sum(alpha, beta):
return alpha + beta
:param aliases: Dictionary of aliases for a function's arguments.
:return: Your original function wrapped with the kwarg redirection
function.
""" # noqa: E501
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rename_kwargs(func.__name__, kwargs, aliases)
return func(*args, **kwargs)
return wrapper
return decorator
def refactored_function(message: str) -> Callable:
"""Used as a decorator when refactoring functions
Implementation is inspired from `Hacker Noon`_.
.. Hacker Noon: https://hackernoon.com/why-refactoring-how-to-restructure-python-package-51b89aa91987
Functional usage example:
.. code-block:: python
@refactored_function(
message="simple_sum() has been refactored. Use hard_sum() instead."
)
def simple_sum(alpha, beta):
return alpha + beta
:param message: Message to use in warning user about refactoring.
:return: Your original function wrapped with the kwarg redirection
function.
""" # noqa: E501
def decorator(func):
def emit_warning(*args, **kwargs):
warnings.warn(message, FutureWarning)
return func(*args, **kwargs)
return emit_warning
return decorator
def rename_kwargs(func_name: str, kwargs: Dict, aliases: Dict):
"""
Used to update deprecated argument names with new names. Throws a
TypeError if both arguments are provided, and warns if old alias is used.
Nothing is returned as the passed ``kwargs`` are modified directly.
Implementation is inspired from `StackOverflow`_.
.. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias
:param func_name: name of decorated function.
:param kwargs: Arguments supplied to the method.
:param aliases: Dictionary of aliases for a function's arguments.
:raises TypeError: if both arguments are provided.
""" # noqa: E501
for old_alias, new_alias in aliases.items():
if old_alias in kwargs:
if new_alias in kwargs:
raise TypeError(
f"{func_name} received both {old_alias} and {new_alias}"
)
warnings.warn(
f"{old_alias} is deprecated; use {new_alias}",
DeprecationWarning,
)
kwargs[new_alias] = kwargs.pop(old_alias)
def check_column(
df: pd.DataFrame, column_names: Union[Iterable, str], present: bool = True
):
"""
One-liner syntactic sugar for checking the presence or absence of columns.
Should be used like this::
check(df, ['a', 'b'], present=True)
This will check whether columns "a" and "b" are present in df's columns.
One can also guarantee that "a" and "b" are not present
by switching to ``present = False``.
:param df: The name of the variable.
:param column_names: A list of column names we want to check to see if
present (or absent) in df.
:param present: If True (default), checks to see if all of column_names
are in df.columns. If False, checks that none of column_names are
in df.columns.
:raises ValueError: if data is not the expected type.
"""
if isinstance(column_names, str) or not isinstance(column_names, Iterable):
column_names = [column_names]
for column_name in column_names:
if present and column_name not in df.columns: # skipcq: PYL-R1720
raise ValueError(
f"{column_name} not present in dataframe columns!"
)
elif not present and column_name in df.columns:
raise ValueError(
f"{column_name} already present in dataframe columns!"
)
def skipna(f: Callable) -> Callable:
"""
Decorator for escaping np.nan and None in a function
Should be used like this::
df[column].apply(skipna(transform))
or::
@skipna
def transform(x):
pass
:param f: the function to be wrapped
:returns: _wrapped, the wrapped function
"""
def _wrapped(x, *args, **kwargs):
if (type(x) is float and np.isnan(x)) or x is None:
return np.nan
return f(x, *args, **kwargs)
return _wrapped
def skiperror(
f: Callable, return_x: bool = False, return_val=np.nan
) -> Callable:
"""
Decorator for escaping any error in a function.
Should be used like this::
df[column].apply(
skiperror(transform, return_val=3, return_x=False))
or::
@skiperror(return_val=3, return_x=False)
def transform(x):
pass
:param f: the function to be wrapped
:param return_x: whether or not the original value that caused error
should be returned
:param return_val: the value to be returned when an error hits.
Ignored if return_x is True
:returns: _wrapped, the wrapped function
"""
def _wrapped(x, *args, **kwargs):
try:
return f(x, *args, **kwargs)
except Exception: # skipcq: PYL-W0703
if return_x:
return x
return return_val
return _wrapped
def _computations_expand_grid(others: dict) -> pd.DataFrame:
"""
Creates a cartesian product of all the inputs in `others`.
Combines Numpy's `mgrid`, with the `take` method in numpy/Pandas,
to expand each input to the length of the cumulative product of
all inputs in `others`.
There is a performance penalty for small entries (length less than 10)
in using this method, instead of `itertools.product`; however, there is
significant performance benefits as the size of the data increases.
Another benefit of this approach,
in addition to the significant performance gains,
is the preservation of data types. This is particularly relevant for
Pandas' extension arrays dtypes (categoricals, nullable integers, ...).
A dataframe of all possible combinations is returned.
"""
for key, _ in others.items():
check("key", key, [str])
grid = {}
for key, value in others.items():
if is_scalar(value):
grid[key] = pd.Series([value])
elif is_extension_array_dtype(value) and not (
isinstance(value, pd.Series)
):
grid[key] = | pd.Series(value) | pandas.Series |
def enter_foodgroup():
import tqdm
import psycopg2
import pandas as pd
from cnf_xplor.api.models import FoodGroup
def fix_french_accents(df, key):
df[key] = [x.encode('utf-8').decode('utf-8') if x is not None else x for x in df[key].values]
return df
def reformat_bool(s):
if s is None:
return None
if (s == 1) | (s == "1"):
return True
if (s == 0) | (s == "0"):
return False
if s == "Y":
return True
if s == "N":
return False
assert False, "problem with boolean {}".format(s)
df_foodgroup = pd.read_csv('UpdatedCNFData/CNFADM_FOOD_GROUP.csv', sep='\t', encoding='utf-16')
df_foodgroup = df_foodgroup.where((pd.notnull(df_foodgroup)), None)
df_foodgroup = fix_french_accents(df_foodgroup,"GROUP_DESC_F")
# 2 and 1 instead of 1 and 0. Needs to be changed
df_foodgroup["FG_DB_SOURCE_C"] = df_foodgroup["FG_DB_SOURCE_C"].values-1
for (i, dat) in tqdm.tqdm(df_foodgroup.iterrows(), total=df_foodgroup.shape[0], desc="foodgroup"):
if dat["GROUP_C"] is None:
continue
FoodGroup.objects.get_or_create(
GROUP_C = dat["GROUP_C"],
GROUP_DESC = dat["GROUP_DESC"],
GROUP_DESC_F = dat["GROUP_DESC_F"],
GROUP_CODE=dat["GROUP_CODE"],
FG_GROUP_ABBRV=dat["FG_GROUP_ABBRV"],
GROUP_COMPOSITE_FLG=reformat_bool(dat["GROUP_COMPOSITE_FLG"]),
GROUP_RECIPE_FLG=reformat_bool(dat["GROUP_RECIPE_FLG"]),
FG_DB_SOURCE_C=reformat_bool(dat["FG_DB_SOURCE_C"]),
FG_BBCA_INDEX=dat["FG_BBCA_INDEX"],
GROUP_OWNER=dat["GROUP_OWNER"],
SHARED_GROUP=dat["SHARED_GROUP"],
)
conn = psycopg2.connect(user = "averster",
password = "<PASSWORD>",
host = "127.0.0.1",
port = "5432",
database = "cnf_xplor")
cur = conn.cursor()
cur.execute(" SELECT setval('\"api_foodgroup_GROUP_C_seq\"',(SELECT max(\"GROUP_C\") + 1 FROM api_foodgroup));")
def enter_source():
import psycopg2
import tqdm
import pandas as pd
from cnf_xplor.api.models import FoodSource
def fix_french_accents(df, key):
df[key] = [x.encode('utf-8').decode('utf-8') if x is not None else x for x in df[key].values]
return df
def reformat_bool(s):
if s is None:
return None
if (s == 1) | (s == "1"):
return True
if (s == 0) | (s == "0"):
return False
if s == "Y":
return True
if s == "N":
return False
assert False, "problem with boolean {}".format(s)
df_foodsource = pd.read_csv('UpdatedCNFData/CNFADM_FOOD_SOURCE.csv', sep = '\t', encoding='utf-16')
df_foodsource = df_foodsource.where((pd.notnull(df_foodsource)), None)
df_foodsource = fix_french_accents(df_foodsource,"SOURCE_DESC_F")
for (i, dat) in tqdm.tqdm(df_foodsource.iterrows(), total=df_foodsource.shape[0], desc="foodsource"):
if dat["SOURCE_C"] is None:
continue
FoodSource.objects.get_or_create(
SOURCE_C = dat["SOURCE_C"],
SOURCE_DESC = dat["SOURCE_DESC"],
SOURCE_DESC_F = dat["SOURCE_DESC_F"],
NRD_REF = dat["NRD_REF"],
SHARED_SOURCE = reformat_bool(dat["SHARED_SOURCE"]),
SOURCE_OWNER = dat["SOURCE_OWNER"],
SOURCE_CNF_OWNER = dat["SOURCE_CNF_OWNER"]
)
conn = psycopg2.connect(user = "averster",
password = "<PASSWORD>",
host = "127.0.0.1",
port = "5432",
database = "cnf_xplor")
cur = conn.cursor()
cur.execute(" SELECT setval('\"api_foodsource_SOURCE_C_seq\"',(SELECT max(\"SOURCE_C\") + 1 FROM api_foodsource));")
def enter_food():
import psycopg2
import pandas as pd
import tqdm
from cnf_xplor.api.models import Food, FoodSource, FoodGroup
def fix_french_accents(df, key):
df[key] = [x.encode('utf-8').decode('utf-8') if x is not None else x for x in df[key].values]
return df
def is_number(s):
try:
int(s)
return True
except (ValueError,TypeError) as e:
return False
def reformat_datetime(s):
from datetime import datetime
if s is None:
return None
try:
#d = datetime.strptime(s, '%d/%m/%Y %H:%M:%S')
d = datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
return d.strftime('%Y-%m-%d %H:%M:%S')
except:
#d = datetime.strptime(s, '%d/%m/%Y')
d = datetime.strptime(s, '%Y-%m-%d')
return d.strftime('%Y-%m-%d')
def reformat_bool(s):
if s is None:
return None
if (s == 1) | (s == "1"):
return True
if (s == 0) | (s == "0"):
return False
if s == "Y":
return True
if s == "N":
return False
assert False, "problem with boolean {}".format(s)
# Load data
df_food = pd.read_csv('UpdatedCNFData/CNFADM_FOOD_NAME.csv', sep='\t', encoding='utf-16')
# Merge, then populate the database
df_food = df_food.where((pd.notnull(df_food)), None)
# Fix the french accents
df_food = fix_french_accents(df_food,"FR_NAME")
df_food = fix_french_accents(df_food,"FOOD_DESC_F")
# Loop through, make a database entry for each row
for (i,dat) in tqdm.tqdm(df_food.iterrows(), total=df_food.shape[0], desc = "food"):
if pd.isnull(dat["GROUP_C"]):
dat["GROUP_C"] = None
if dat["FOOD_C"] is None:
print(dat)
continue
if not is_number(dat["FOOD_C"]):
print(dat)
continue
try:
g = FoodGroup.objects.get(pk=int(dat["GROUP_C"]))
except:
g = None
try:
s = FoodSource.objects.get(pk=int(dat["SOURCE_C"]))
except:
s = None
#try:
# This will work if there is no item, or if we have done no database migrations
#print(dat)
Food.objects.get_or_create(
FOOD_C = dat["FOOD_C"],
ENG_NAME=dat["ENG_NAME"],
FR_NAME=dat["FR_NAME"],
FOOD_DESC=dat["FOOD_DESC"],
FOOD_DESC_F=dat["FOOD_DESC_F"],
NUT_CAN_C=dat["NUT_CAN_C"],
CNF_FLAG=reformat_bool(dat["CNF_FLAG"]),
SOURCE = s,
COUNTRY_C = dat["COUNTRY_C"],
GROUP = g,
COMMENT_T = dat["COMMENT_T"],
FN_COMMENT_F = dat["FN_COMMENT_F"],
FOOD_REFERENCE=dat["FOOD_REFERENCE"],
SCIENTIFIC_NAME=dat["SCIENTIFIC_NAME"],
PUBLICATION_FLAG=reformat_bool(dat["PUBLICATION_FLAG"]),
PUBLICATION_CODE=dat["PUBLICATION_CODE"],
ITEM_C=dat["ITEM_C"],
SEQUENCE_C=dat["SEQUENCE_C"],
LEGACY_GROUP_C=dat["LEGACY_GROUP_C"],
COMMON_NM_E=dat["COMMON_NM_E"],
COMMON_NM_F=dat["COMMON_NM_F"],
FN_DB_SOURCE_C=dat["FN_DB_SOURCE_C"],
FN_RECIPE_FLG=reformat_bool(dat["FN_RECIPE_FLG"]),
FN_SYSTEM_VIEW_C=dat["FN_SYSTEM_VIEW_C"],
FN_FAT_CHANGE=dat["FN_FAT_CHANGE"],
FN_MOISTURE_CHANGE=dat["FN_MOISTURE_CHANGE"],
FN_SYS_USER_CREATE_C=dat["FN_SYS_USER_CREATE_C"],
FN_SYS_USER_EDIT_C=dat["FN_SYS_USER_EDIT_C"],
FN_TEMPLATE_C=dat["FN_TEMPLATE_C"],
FN_TEMP=dat["FN_TEMP"],
FN_ARCHIVED=dat["FN_ARCHIVED"],
FN_LEGACY_C=dat["FN_LEGACY_C"],
US_RECIPE_C=dat["US_RECIPE_C"],
USDA_MODIFIED=reformat_bool(dat["USDA_MODIFIED"]),
USDA_TEMP=dat["USDA_TEMP"],
CANADA_FOOD_SUBGROUP_ID=dat["CANADA_FOOD_SUBGROUP_ID"],
CFGHE_FLAG=dat["CFGHE_FLAG"],
ORIG_CANADA_FOOD_SUBGROUP_ID=dat["ORIG_CANADA_FOOD_SUBGROUP_ID"],
FOOD_OWNER=dat["FOOD_OWNER"],
SHARED_FOOD=reformat_bool(dat["SHARED_FOOD"]),
CANDI_REC_NUM=dat["CANDI_REC_NUM"],
INHERITANCE_FLAG=reformat_bool(dat["INHERITANCE_FLAG"]),
FOOD_CODE=dat["FOOD_CODE"],
DATE_ENTRY = reformat_datetime(dat["DATE_ENTRY"]),
DATE_CHANGE = reformat_datetime(dat["DATE_CHANGE"]),
DATE_END = reformat_datetime(dat["DATE_END"])
)
#except:
# f = Food.objects.get(dat["FOOD_C"])
# f.SOURCE_C = dat["SOURCE_C"],
# f.COUNTRY_C = dat["COUNTRY_C"],
# f.GROUP = g,
#
# f.ENG_NAME = dat["ENG_NAME"],
# f.FR_NAME = dat["FR_NAME"],
# f.FOOD_DESC = dat["FOOD_DESC"],
# f.FOOD_DESC_F = dat["FOOD_DESC_F"],
# f.COMMENT_T = dat["COMMENT_T"],
# f.FN_COMMENT_F = dat["FN_COMMENT_F"],
#
# f.DATE_ENTRY = reformat_datetime(dat["DATE_ENTRY"]),
# f.DATE_CHANGE = reformat_datetime(dat["DATE_CHANGE"]),
# r.DATE_END = reformat_datetime(dat["DATE_END"])
# f.save()
#Finally, deal with the sequence
#SELECT setval('"viewfood_food_FOOD_C_seq"',(SELECT max("FOOD_C") + 1 FROM viewfood_food));
conn = psycopg2.connect(user = "averster",
password = "<PASSWORD>",
host = "127.0.0.1",
port = "5432",
database = "cnf_xplor")
cur = conn.cursor()
cur.execute(" SELECT setval('\"api_food_FOOD_C_seq\"',(SELECT max(\"FOOD_C\") + 1 FROM api_food));")
def enter_conversion_factors():
import pandas as pd
import tqdm
from cnf_xplor.api.models import ConversionFactor, Measure, Food, MeasureType
import psycopg2
def fix_french_accents(df, key):
df[key] = [x.encode('utf-8').decode('utf-8') if x is not None else x for x in df[key].values]
return df
def reformat_datetime(s):
from datetime import datetime
if s is None:
return None
try:
d = datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
return d.strftime('%Y-%m-%d %H:%M:%S')
except:
try:
d = datetime.strptime(s, '%Y-%m-%d %H:%M')
return d.strftime('%Y-%m-%d %H:%M')
except:
d = datetime.strptime(s, '%Y-%m-%d')
return d.strftime('%Y-%m-%d')
def reformat_bool(s):
if s is None:
return None
if (s == 1) | (s == "1"):
return True
if (s == 0) | (s == "0"):
return False
if s == "Y":
return True
if s == "N":
return False
assert False, "problem with boolean {}".format(s)
def convert_to_int(s):
if s is None:
return s
return int(s)
infile = "UpdatedCNFData/CNFADM_CONV_FACTOR.csv"
df_convfactor = pd.read_csv(infile, sep = "\t", encoding="utf-16")
df_convfactor = df_convfactor.where((pd.notnull(df_convfactor)), None)
infile = "UpdatedCNFData/CNFADM_MEASURE.csv"
df_measure = pd.read_csv(infile, sep = "\t", encoding="utf-16")
df_measure = df_measure.where((pd.notnull(df_measure)), None)
infile = "UpdatedCNFData/CNFADM_MEASURE_TYPE.csv"
df_measuretype = pd.read_csv(infile, sep = "\t", encoding="utf-16")
df_measuretype = df_measuretype.where((pd.notnull(df_measuretype)), None)
# Merge, then add to the database
#df_full = df_convfactor.merge(df_measure, on = "MEASURE_ID", how = "left")
#df_full = df_full.where((pd.notnull(df_full)), None)
# fix encoding on this one
#df_full = fix_french_accents(df_full,"MEASURE_DESC")
#df_full = fix_french_accents(df_full,"MEASURE_DESC_F")
#df_full = fix_french_accents(df_full,"MEASURE_ABBRV")
#df_full = fix_french_accents(df_full,"MEASURE_ABBRV_F")
for (i,dat) in tqdm.tqdm(df_measuretype.iterrows(), total=df_measuretype.shape[0], desc="measuretype"):
MeasureType.objects.get_or_create(
M_TYPE_ID=dat["M_TYPE_ID"],
M_TYPE_DESC=dat["M_TYPE_DESC"],
M_TYPE_ABBRV=dat["M_TYPE_ABBRV"],
M_TYPE_USER_SELECTABLE=reformat_bool(dat["M_TYPE_USER_SELECTABLE"]),
M_TYPE_DESC_F=dat["M_TYPE_DESC_F"]
)
for (i,dat) in tqdm.tqdm(df_measure.iterrows(), total=df_measure.shape[0], desc="measure"):
try:
t = MeasureType.objects.get(pk=int(dat["M_TYPE_ID"]))
except:
print("Can't find a measure type for {}".format(dat["M_TYPE_ID"]))
t = None
Measure.objects.get_or_create(
MEASURE_ID=dat["MEASURE_ID"],
MEASURE_DESC=dat["MEASURE_DESC"],
MEASURE_DESC_F=dat["MEASURE_DESC_F"],
MEASURE_ABBRV=dat["MEASURE_ABBRV"],
MEASURE_ABBRV_F=dat["MEASURE_ABBRV_F"],
M_TYPE=t,
MEASURE_OWNER=dat["MEASURE_OWNER"],
SHARED_MEASURE=reformat_bool(dat["SHARED_MEASURE"]),
PUBLICATION_CODE=dat["PUBLICATION_CODE"]
)
for (i,dat) in tqdm.tqdm(df_convfactor.iterrows(), total=df_convfactor.shape[0], desc="convertionfactor"):
try:
f = Food.objects.get(pk=int(dat["FOOD_C"]))
except:
print("Can't find a food for {}".format(dat["FOOD_C"]))
f = None
try:
m = Measure.objects.get(pk=int(dat["MEASURE_ID"]))
except:
print("Can't find a measure for {}".format(dat["MEASURE_ID"]))
m = None
ConversionFactor.objects.get_or_create(
CF_FACTOR_C = dat["CF_FACTOR_C"],
FOOD_C = f,
MEASURE_ID = m,
CONV_FACTOR = dat["CONV_FACTOR"],
COMMENT_T=dat["COMMENT_T"],
CF_COMMENT_F=dat["CF_COMMENT_F"],
FLAG_CFG=reformat_bool(dat["FLAG_CFG"]),
NO_SERVING=dat["NO_SERVING"],
VALID_NULL_CF=dat["VALID_NULL_CF"],
CF_REF_ID=dat["CF_REF_ID"],
PUBLICATION_CODE=dat["PUBLICATION_CODE"],
SEQ_WEB=dat["SEQ_WEB"],
FLAG_REFERENCE_AMOUNT=reformat_bool(dat["FLAG_REFERENCE_AMOUNT"]),
SHOW_SERVING_SIZE=reformat_bool(dat["SHOW_SERVING_SIZE"]),
CF_OWNER=dat["CF_OWNER"],
SHARED_CF=reformat_bool(dat["SHARED_CF"]),
FLAG_REPORT=reformat_bool(dat["FLAG_REPORT"]),
FN_SYS_USER_CREATE_C=dat["FN_SYS_USER_CREATE_C"],
FN_SYS_USER_EDIT_C=dat["FN_SYS_USER_EDIT_C"],
PUBLICATION_FLAG=reformat_bool(dat["PUBLICATION_FLAG"]),
DATE_ENTRY = reformat_datetime(dat["DATE_ENTRY"]),
DATE_END = reformat_datetime(dat["DATE_END"]),
)
conn = psycopg2.connect(user = "averster",
password = "<PASSWORD>",
host = "127.0.0.1",
port = "5432",
database = "cnf_xplor")
cur = conn.cursor()
cur.execute(" SELECT setval('\"api_conversionfactor_CF_FACTOR_C_seq\"',(SELECT max(\"CF_FACTOR_C\") + 1 FROM api_conversionfactor));")
def add_nutrients():
import pandas as pd
import tqdm
from cnf_xplor.api.models import Nutrient, Food, NutrientOfFood, NutrientSource, NutrientReference
import psycopg2
def fix_french_accents(df, key):
df[key] = [x.encode('utf-8').decode('utf-8') if x is not None else x for x in df[key].values]
return df
def convert_to_int(s):
if s is None:
return s
return int(s)
def reformat_bool(s):
if s is None:
return None
if (s == 1) | (s == "1"):
return True
if (s == 0) | (s == "0"):
return False
if s == "Y":
return True
if s == "N":
return False
assert False, "problem with boolean {}".format(s)
df_nutramount = pd.read_csv('UpdatedCNFData/CNFADM_NUTR_AMOUNT.csv', delimiter = "\t", encoding='utf-16')
df_nutrname = | pd.read_csv('UpdatedCNFData/CNFADM_NUTR_NAME.csv', delimiter = "\t", encoding='utf-16') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 17 11:56:35 2019
@author: hcamphausen
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.preprocessing import PolynomialFeatures, MinMaxScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.preprocessing import MinMaxScaler
df = pd.read_csv('train.csv', index_col=0, parse_dates=True)
plt.figure(figsize=(8,4))
sns.boxplot(x='season', y='count', data=df)
plt.figure(figsize=(16,8))
sns.regplot(x='windspeed', y='count', data=df)
plt.figure(figsize=(30,8))
sns.boxplot(x='humidity', y='count', data=df)
plt.figure(figsize=(16,8))
sns.regplot(x='humidity', y='count', data=df)
# FEATURE ENGINEERING
def feature_engineering(df):
# drop columns that test data does not have
if 'casual' and 'registered' in df.columns:
df.drop(['casual', 'registered'], axis=1, inplace=True)
else:
pass
# one-hot encoding season
one_hot_encoded = pd.get_dummies(df['season'])
df = pd.concat((df, one_hot_encoded), axis=1)
df.rename(columns={1: "spring", 2: "summer", 3: "fall", 4: "winter"}, inplace=True)
df.drop(['season'], axis = 1, inplace=True)
#weather - 1: Clear, 2Few clouds, 3Partly cloudy, 4Partly cloudy
one_hot_encoded_2 = pd.get_dummies(df['weather'])
df = pd.concat((df, one_hot_encoded_2), axis=1)
df.rename(columns={1:"clear",2:"few_clouds",3:"partly_cloudy",4:"cloudy"}, inplace=True)
df.drop(['cloudy'], axis=1, inplace=True)
df.drop(['weather'], axis=1, inplace=True)
# log count - remember to exponent count for test predictions
df['count_log'] = np.log1p(df['count'])
df.drop(['count'], axis=1, inplace=True)
# add hour column
df['hour'] = df.index.hour
#df['year'] = df.index.year
#df['month'] = df.index.month
#df['day'] = df.index.day
df['dayofweek'] = df.index.dayofweek
# one hot encoding hour and dayof week
one_hot_encoded_day_of_week = pd.get_dummies(df['dayofweek'])
df = pd.concat((df, one_hot_encoded_day_of_week), axis=1)
df.rename(columns={0:"Monday",1:"Tuesday",2:"Wednesday",3:"Thursday",4:"Friday",5:"Saturday",6:"Sunday"}, inplace=True)
df.drop(['dayofweek'], axis=1, inplace=True)
one_hot_encoded_hour = pd.get_dummies(df['hour'])
df = pd.concat((df, one_hot_encoded_hour), axis=1)
df.drop(['hour'], axis=1, inplace=True)
# drop temperatures
df.drop(['temp','atemp'], axis=1, inplace=True)
# drop holiday as super small dataset for 1
df.drop(['holiday'], axis = 1, inplace=True)
#scaling data
#scaler = MinMaxScaler()
#df[['humidity', 'windspeed']] = scaler.fit_transform(df[['humidity', 'windspeed']])
#drop windspeed as weird measurings
df.drop(['windspeed'], axis=1, inplace=True)
#drop humidity as weird measurings
df.drop(['humidity'], axis=1, inplace=True)
return df
df_train = feature_engineering(df)
df_train.head()
plt.figure(figsize=(8,4))
sns.heatmap(df_train.corr(), cmap='Oranges', annot=True)
corr = df_train[df_train.columns[1:]].corr()['count_log'][:]
df_corr = pd.DataFrame(data=corr)
df_corr.plot.bar()
# SPLITTING TRAIN AND TEST DATA SET
df_train.columns
# suffle : df_train = df_train.sample(len(df_train))
X = df_train[['workingday', 'spring', 'summer', 'fall',
'winter', 'clear', 'few_clouds', 'partly_cloudy',
'Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday', 'Sunday',
0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15,
16, 17, 18, 19,
20, 21, 22, 23
]]
y = df_train['count_log']
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.5)
pipeline = make_pipeline(
MinMaxScaler(), # transform
Ridge(alpha=0.0) # predict
)
# Hyperparameter Optimization
g = GridSearchCV(pipeline, cv=5, param_grid={
'ridge__alpha': [0.0, 0.1, 0.01, 0.001]
})
#fitting model
g.fit(Xtrain,ytrain)
# train vs. test scores
train_score = g.score(Xtrain, ytrain)
print("This is my training score: " + str(train_score))
test_score = g.score(Xtest, ytest)
print("This is my testing score: " + str(test_score))
y_pred = g.predict(Xtrain)
mse_train = mean_squared_error(ytrain, y_pred)
print("This is my train MSE: " + str(mse_train))
y_predtest = g.predict(Xtest)
mse_test = mean_squared_error(ytest, y_predtest)
print("This is my test MSE: " + str(mse_test))
y2 = np.expm1(ytrain)
y2pred = np.expm1(y_pred)
mae_train = mean_absolute_error(y2, y2pred)
print("This is my train MAE: " + str(mae_train))
y3 = np.expm1(ytest)
y3pred = np.expm1(y_predtest)
mae_test = mean_absolute_error(y3, y3pred)
print("This is my test MAE: " + str(mae_test))
#######################
#CHECKING ASSUMPTIONS
sns.jointplot(ytrain,y_pred, kind="reg")
# Autocorrelation - <NAME> (aim at 2)
from statsmodels.stats.stattools import durbin_watson
print(durbin_watson(ytrain-y_pred, axis=0))
# Sum of residuals = 0?
residuals = ytrain - y_pred
residuals = np.array(residuals)
sum_res = residuals.mean().round(5)
print("The sum of my residuals is :" + str(sum_res))
# Normal distribution of residuals?
plt.hist(residuals, bins=20)
# Change in variance - homoscedasticity / heteroscedasticity
import statsmodels.api as sm
pl = sm.qqplot(residuals, line='r')
# Are features linearly independent?
from statsmodels.stats.outliers_influence import variance_inflation_factor as VIF
vifs = [VIF(df_train.values, i) for i, colname in enumerate(df_train)]
s = pd.Series(vifs,index=df_train.columns)
s.plot.bar()
##########################
# Kaggle test set
kaggle_test = pd.read_csv('test.csv', parse_dates=True,index_col=0)
def feature_engineering_test(df):
# drop columns that test data does not have
if 'casual' and 'registered' in df.columns:
df.drop(['casual', 'registered'], axis=1, inplace=True)
else:
pass
# one-hot encoding season
one_hot_encoded = pd.get_dummies(df['season'])
df = pd.concat((df, one_hot_encoded), axis=1)
df.rename(columns={1: "spring", 2: "summer", 3: "fall", 4: "winter"}, inplace=True)
df.drop(['season'], axis = 1, inplace=True)
#weather - 1: Clear, 2Few clouds, 3Partly cloudy, 4Partly cloudy
one_hot_encoded_2 = | pd.get_dummies(df['weather']) | pandas.get_dummies |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
| tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0) | pandas.util.testing.assertRaisesRegexp |
# author: <NAME> and <NAME>
# date: 2020-06-24
"""
This script performs will train the best model
Usage: src/03_modelling/011_modeling.py \
--file_path1=<file_path1> --file_path2=<file_path2> --file_path3=<file_path3> \
--save_to1=<save_to1> --save_to2=<save_to2> --save_model=<save_model>
Options:
--file_path1=<file_path1> This is the file path for training set
--file_path2=<file_path2> This is the file path for validation set
--file_path3=<file_path3> This is the file path for test set
--save_to1=<save_to1> This is the file path
for the model performance
--save_to2=<save_to2> This is the file path
for the important features
--save_model=<save_model> This is the file path
the model will be saved
"""
# import library
# Basics
from docopt import docopt
import pandas as pd
import matplotlib.pyplot as plt
from joblib import dump
# Preprocessing
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
# Models
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier
# Pipeline
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
# Evaluation
from sklearn.metrics import plot_confusion_matrix, f1_score
from sklearn.metrics import recall_score, precision_score
from sklearn.metrics import classification_report
# Model Explanation
import eli5
opt = docopt(__doc__)
def main(file_path1, file_path2, file_path3,
save_to1, save_to2, save_model):
train = pd.read_csv(file_path1, low_memory=False)
validation = pd.read_csv(file_path2, low_memory=False)
# test = pd.read_csv(file_path3, low_memory=False)
def feature_engineering(df):
df = df[df.LocalArea.notnull()]
return df.drop(columns=label), df['label']
# df_list = [train, validation, test]
df_list = [train, validation]
for df in df_list:
df.drop(columns=['business_id', 'BusinessName',
'BusinessTradeName', 'Status',
'BusinessSubType', 'Geom',
'NextYearStatus', 'BusinessIndustry'], inplace=True)
# list all categorical varibles and numeric variables
cat_vars = ['FOLDERYEAR', 'BusinessType', 'LocalArea']
label = ['label']
num_vars = [i for i in train.columns
if i not in cat_vars and i not in label]
# preprocessing techniques
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())
])
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant',
fill_value='missing')),
('onehot', OneHotEncoder(
handle_unknown='ignore'))
])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, num_vars),
('cat', categorical_transformer, cat_vars)
])
X_train, y_train = feature_engineering(train)
X_valid, y_valid = feature_engineering(validation)
# X_test, y_test = feature_engineering(test)
def evaluate_model(model, X_train=X_train, X_test=X_valid,
y_train=y_train, y_test=y_valid, verbose=True):
"""
This function prints train and test accuracies,
classification report, and confusion matrix.
"""
model.fit(X_train, y_train)
train_acc = model.score(X_train, y_train)
test_acc = model.score(X_test, y_test)
if verbose:
print("Train Accuracy:", train_acc)
print("Validation Accuracy", test_acc, "\n")
print(classification_report(y_test, model.predict(X_test)))
print(plot_confusion_matrix(model, X_test, y_test,
display_labels=[0, 1],
cmap=plt.cm.Blues,
values_format='d'))
else:
report = {}
f1 = f1_score(y_test, model.predict(X_test))
recall = recall_score(y_test, model.predict(X_test))
precision = precision_score(y_test, model.predict(X_test))
report['renewed'] = [f1, recall, precision]
f1 = f1_score(y_test, model.predict(X_test), pos_label=0)
recall = recall_score(y_test, model.predict(X_test), pos_label=0)
precision = precision_score(
y_test, model.predict(X_test), pos_label=0)
report['not_renewed'] = [f1, recall, precision]
report['accuracy'] = [train_acc, test_acc]
return report
def convert_for_output(df):
"""
This function convert the output of evaluate model
to more concise form. It will return a confusion matrix
and an accuracy matrix
"""
renew_df = pd.DataFrame.from_dict(df['renewed'])
renew_df.columns = ['renwed']
renew_df['label'] = ['f1', 'recall', 'precision']
renew_df.set_index('label', inplace=True)
no_df = pd.DataFrame.from_dict(df['not_renewed'])
no_df.columns = ['not_renwed']
no_df['label'] = ['f1', 'recall', 'precision']
no_df.set_index('label', inplace=True)
confusion = pd.concat([renew_df, no_df], axis=1)
accu = pd.DataFrame(df['accuracy'])
accu.columns = ['accuracy']
accu['label'] = ['train', 'validation']
accu.set_index('label', inplace=True)
return confusion, accu
def explain_model(pip, df, verbose=True):
"""
This function will output a pandas dataframe to
show the important features and their weights in a model
"""
pp1_features = num_vars + \
list(pip['preprocessor'].transformers_[
1][1]['onehot'].get_feature_names())
return eli5.formatters.as_dataframe.explain_weights_df(
pip['classifier'],
feature_names=pp1_features,
top=30)
lr = LogisticRegression(solver='saga', class_weight='balanced')
lr_pip = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', lr)])
lr_performance = evaluate_model(lr_pip, verbose=False)
lr_confusion, lr_accuracy = convert_for_output(lr_performance)
lgbm = LGBMClassifier(class_weight='balanced')
lgbm_pip = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', lgbm)])
lgbm_performance = evaluate_model(lgbm_pip, verbose=False)
lgbm_confusion, lgbm_accuracy = convert_for_output(lgbm_performance)
lgbm_top_features = explain_model(lgbm_pip, X_train)
df_list = [lr_confusion, lr_accuracy, lgbm_confusion, lgbm_accuracy]
df_name = ['lr_confusion', 'lr_accuracy',
'lgbm_confusion', 'lgbm_accuracy']
writer = pd.ExcelWriter(save_to1)
for i, df in enumerate(df_list):
df.to_excel(writer, df_name[i])
writer.save()
lgbm_top_features.to_csv(save_to2, index=False)
######################
# Save model to file # - JQ
######################
X_train_valid = | pd.concat([X_train, X_valid], ignore_index=True) | pandas.concat |
__all__ = [
'factorize_array',
'factorize_dataframe',
'factorize_ndarray',
'fast_zip_arrays',
'fast_zip_dataframe_columns',
'get_dataframe',
'get_dtypes_and_required_cols',
'get_ids',
'get_json',
'get_timestamp',
'get_utctimestamp',
'merge_dataframes',
'PANDAS_BASIC_DTYPES',
'PANDAS_DEFAULT_NULL_VALUES',
'set_dataframe_column_dtypes'
]
import builtins
import io
import json
import re
import warnings
from datetime import datetime
from itertools import groupby
try:
from json import JSONDecodeError
except ImportError:
from builtins import ValueError as JSONDecodeError
from tabulate import tabulate
import numpy as np
import pandas as pd
import pytz
from .exceptions import OasisException
pd.options.mode.chained_assignment = None
warnings.simplefilter(action='ignore', category=FutureWarning)
PANDAS_BASIC_DTYPES = {
'int8': np.int8,
'uint8': np.uint8,
'int16': np.int16,
'uint16': np.uint16,
'int32': np.int32,
'uint32': np.uint32,
'int64': np.int64,
'uint64': np.uint64,
builtins.int: np.int64,
'float32': np.float32,
'float64': np.float64,
builtins.float: np.float64,
'bool': np.bool,
builtins.bool: np.bool,
'str': np.object,
builtins.str: np.object
}
PANDAS_DEFAULT_NULL_VALUES = {
'-1.#IND',
'1.#QNAN',
'1.#IND',
'-1.#QNAN',
'#N/A N/A',
'#N/A',
'N/A',
'n/a',
'NA',
'#NA',
'NULL',
'null',
'NaN',
'-NaN',
'nan',
'-nan',
''
}
def factorize_array(arr):
"""
Groups a 1D Numpy array by item value, and optionally enumerates the
groups, starting from 1. The default or assumed type is a Nunpy
array, although a Python list, tuple or Pandas series will work too.
:param arr: 1D Numpy array (or list, tuple, or Pandas series)
:type arr: numpy.ndarray
:return: A 2-tuple consisting of the enumeration and the value groups
:rtype: tuple
"""
enum, groups = pd.factorize(arr)
return enum + 1, groups
def factorize_ndarray(ndarr, row_idxs=[], col_idxs=[]):
"""
Groups an n-D Numpy array by item value, and optionally enumerates the
groups, starting from 1. The default or assumed type is a Nunpy
array, although a Python list, tuple or Pandas series will work too.
:param ndarr: n-D Numpy array (or appropriate Python structure or Pandas dataframe)
:type ndarr: numpy.ndarray
:param row_idxs: A list of row indices to use for factorization (optional)
:type row_idxs: list
:param col_idxs: A list of column indices to use for factorization (optional)
:type col_idxs: list
:return: A 2-tuple consisting of the enumeration and the value groups
:rtype: tuple
"""
if not (row_idxs or col_idxs):
raise OasisException('A list of row indices or column indices must be provided')
_ndarr = ndarr[:, col_idxs].transpose() if col_idxs else ndarr[row_idxs, :]
rows, _ = _ndarr.shape
if rows == 1:
return factorize_array(_ndarr[0])
enum, groups = pd.factorize(fast_zip_arrays(*(arr for arr in _ndarr)))
return enum + 1, groups
def factorize_dataframe(
df,
by_row_labels=None,
by_row_indices=None,
by_col_labels=None,
by_col_indices=None
):
"""
Groups a selection of rows or columns of a Pandas DataFrame array by value,
and optionally enumerates the groups, starting from 1.
:param df: Pandas DataFrame
:type: pandas.DataFrame
:param by_row_labels: A list or tuple of row labels
:type by_row_labels: list, tuple
:param by_row_indices: A list or tuple of row indices
:type by_row_indices: list, tuple
:param by_col_labels: A list or tuple of column labels
:type by_col_labels: list, tuple
:param by_col_indices: A list or tuple of column indices
:type by_col_indices: list, tuple
:return: A 2-tuple consisting of the enumeration and the value groups
:rtype: tuple
"""
by_row_indices = by_row_indices or (None if not by_row_labels else [df.index.get_loc(label) for label in by_row_labels])
by_col_indices = by_col_indices or (None if not by_col_labels else [df.columns.get_loc(label) for label in by_col_labels])
return factorize_ndarray(
df.values,
row_idxs=by_row_indices,
col_idxs=by_col_indices
)
def fast_zip_arrays(*arrays):
"""
Speedy zip of a sequence or ordered iterable of Numpy arrays (Python
iterables with ordered elements such as lists and tuples, or iterators
or generators of these, will also work).
:param arrays: An iterable or iterator or generator of Numpy arrays
:type arrays: list, tuple, collections.Iterator, types.GeneratorType
:return: A Numpy 1D array of n-tuples of the zipped sequences
:rtype: np.array
"""
return pd._libs.lib.fast_zip([arr for arr in arrays])
def fast_zip_dataframe_columns(df, cols):
"""
Speedy zip of a sequence or ordered iterable of Pandas DataFrame columns
(Python iterables with ordered elements such as lists and tuples, or
iterators or generators of these, will also work).
:param df: Pandas DataFrame
:type df: pandas.DataFrame
:param cols: An iterable or iterator or generator of Pandas DataFrame columns
:type cols: list, tuple, collections.Iterator, types.GeneratorType
:return: A Numpy 1D array of n-tuples of the dataframe columns to be zipped
:rtype: np.array
"""
return fast_zip_arrays(*(df[col].values for col in cols))
def get_dataframe(
src_fp=None,
src_type='csv',
src_buf=None,
src_data=None,
float_precision='high',
empty_data_error_msg=None,
lowercase_cols=True,
required_cols=(),
col_defaults={},
non_na_cols=(),
col_dtypes={},
sort_cols=None,
sort_ascending=None,
memory_map=False
):
"""
Loads a Pandas dataframe from a source CSV or JSON file, or a text buffer
of such a file (``io.StringIO``), or another Pandas dataframe.
:param src_fp: Source CSV or JSON file path (optional)
:type src_fp: str
:param src_type: Type of source file -CSV or JSON (optional; default is csv)
:param src_type: str
:param src_buf: Text buffer of a source CSV or JSON file (optional)
:type src_buf: io.StringIO
:param float_precision: Indicates whether to support high-precision numbers
present in the data (optional; default is high)
:type float_precision: str
:param empty_data_error_msg: The message of the exception that is thrown
there is no data content, i.e no rows
(optional)
:type empty_data_error_msg: str
:param lowercase_cols: Whether to convert the dataframe columns to lowercase
(optional; default is True)
:type lowercase_cols: bool
:param required_cols: An iterable of columns required to be present in the
source data (optional)
:type required_cols: list, tuple, collections.Iterable
:param col_defaults: A dict of column names and their default values. This
can include both existing columns and new columns -
defaults for existing columns are set row-wise using
pd.DataFrame.fillna, while defaults for non-existent
columns are set column-wise using assignment (optional)
:type col_defaults: dict
:param non_na_cols: An iterable of names of columns which must be dropped
if they contain any null values (optional)
:type non_na_cols: list, tuple, collections.Iterable
:param col_dtypes: A dict of column names and corresponding data types -
Python built-in datatypes are accepted but are mapped
to the corresponding Numpy datatypes (optional)
:type col_dtypes: dict
:param sort_cols: An iterable of column names by which to sort the frame
rows (optional)
:type sort_cols: list, tuple, collections.Iterable
:param sort_ascending: Whether to perform an ascending or descending sort -
is used only in conjunction with the sort_cols
option (optional)
:type sort_ascending: bool
:param memory_map: Memory-efficient option used when loading a frame from
a file or text buffer - is a direct optional argument
for the pd.read_csv method
:type memory_map: bool
:return: A Pandas dataframe
:rtype: pd.DataFrame
"""
if not (src_fp or src_buf or src_data is not None):
raise OasisException(
'A CSV or JSON file path or a string buffer of such a file or an '
'appropriate data structure or Pandas DataFrame must be provided'
)
df = None
# Use a custom list of null values without the string "`NA`" when using
# pandas.read_csv because the default list contains the string "`NA`",
# which can appear in the `CountryCode` column in the loc. file
na_values = list(PANDAS_DEFAULT_NULL_VALUES.difference(['NA']))
if src_fp and src_type == 'csv':
# Find flexible fields in loc file and set their data types to that of
# FlexiLocZZZ
if 'FlexiLocZZZ' in col_dtypes.keys():
headers = list(pd.read_csv(src_fp).head(0))
for flexiloc_col in filter(re.compile('^FlexiLoc').match, headers):
col_dtypes[flexiloc_col] = col_dtypes['FlexiLocZZZ']
df = pd.read_csv(src_fp, float_precision=float_precision, memory_map=memory_map, keep_default_na=False, na_values=na_values, dtype=col_dtypes)
elif src_buf and src_type == 'csv':
df = pd.read_csv(io.StringIO(src_buf), float_precision=float_precision, memory_map=memory_map, keep_default_na=False, na_values=na_values)
elif src_fp and src_type == 'json':
df = pd.read_json(src_fp, precise_float=(True if float_precision == 'high' else False))
elif src_buf and src_type == 'json':
df = pd.read_json(io.StringIO(src_buf), precise_float=(True if float_precision == 'high' else False))
elif isinstance(src_data, list) and src_data:
df = pd.DataFrame(data=src_data)
elif isinstance(src_data, pd.DataFrame):
df = src_data.copy(deep=True)
if len(df) == 0:
raise OasisException(empty_data_error_msg)
if lowercase_cols:
df.columns = df.columns.str.lower()
if required_cols:
_required_cols = [c.lower() for c in required_cols] if lowercase_cols else required_cols
missing = {col for col in sorted(_required_cols)}.difference(df.columns)
if missing:
raise OasisException('Missing required columns: {}'.format(missing))
# Defaulting of column values is best done via the source data and not the
# code, i.e. if a column 'X' in a frame is supposed to have 0s everywhere
# the simplest way of achieving this is for the source data (whether it is
# CSV or JSON file, or a list of dicts or tuples) to have an 'X' column
# with 0 values, so that when it is loaded into the frame the 'X' will have
# 0 values, as expected.
#
# In this sense, defaulting of column values via the `col_defaults`
# optional argument is redundant - but there may be some cases where it is
# convenient to have this feature at the code level.
if col_defaults:
# Lowercase the keys in the defaults dict depending on whether the `lowercase_cols`
# option was passed
_col_defaults = {k.lower(): v for k, v in col_defaults.items()} if lowercase_cols else col_defaults
# Use the defaults dict to set defaults for existing columns
df.fillna(value=_col_defaults, inplace=True)
# A separate step to set as yet non-existent columns with default values
# in the frame
new = {k: _col_defaults[k] for k in set(_col_defaults).difference(df.columns)}
df = df.join( | pd.DataFrame(data=new, index=df.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
普量学院量化投资课程系列案例源码包
普量学院版权所有
仅用于教学目的,严禁转发和用于盈利目的,违者必究
©Plouto-Quants All Rights Reserved
普量学院助教微信:niuxiaomi3
"""
from pymongo import ASCENDING, DESCENDING
from database import DB_CONN
from datetime import datetime, timedelta
import tushare as ts
import numpy as np
import pandas as pd
def compute_drawdown(net_values):
"""
计算最大回撤
:param net_values: 净值列表
"""
# 最大回撤初始值设为0
max_drawdown = 0
size = len(net_values)
index = 0
# 双层循环找出最大回撤
for net_value in net_values:
for sub_net_value in net_values[index:]:
drawdown = 1 - sub_net_value / net_value
if drawdown > max_drawdown:
max_drawdown = drawdown
index += 1
return max_drawdown
def dynamic_max_drawdown(net_value):
nums = len(net_value)
maxDrawDown = | pd.Series() | pandas.Series |
'''
Created on Jun 25, 2015
@author: eze
'''
import logging
import os
import re
import traceback
from multiprocessing.synchronize import Lock
import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.Polypeptide import CaPPBuilder
from SNDG import Struct
from SNDG import init_log, mkdir, execute
from SNDG.Structure.CompoundTypes import compound_type
from SNDG.Structure.PDBs import PDBs as PDBsIterator
import shutil
init_log()
_log = logging.getLogger("dn_ext")
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
parser = ArgumentParser( formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-v", "--verbose", dest="verbose", action="count",
help="set verbosity level [default: %(default)s]")
# parser.add_argument("-dir", "--structs_dir", required = True )
parser.add_argument("-db", "--database_name", default='pdb')
parser.add_argument("-host", "--db_host", default='127.0.0.1')
parser.add_argument( "--procesados", default='/tmp/pdbs_dist_procesados.txt')
parser.add_argument( "--domains", default='/data/databases/pdb/processed/dns_pdbs.tlb')
parser.add_argument( "--seqs", default='/data/databases/pdb/processed/pdb_seq_res.fasta')
parser.add_argument( "--pdbs", default='/data/databases/pdb/')
parser.add_argument( "--distances", default='/data/databases/pdb/processed/distances.tbl',
help="Final output: table with atom distances between residues and ligands. Only for distances less than 'dist' parameter")
parser.add_argument( "--dist", default=5)
parser.add_argument( "--pdbs_with_drug", default='/data/databases/pdb/processed/pdbs_with_drug.txt',
help="Output: list of PDB codes with an associated ligand")
args = parser.parse_args()
if not os.path.exists(args.pdbs):
sys.stderr.write("%s not found. Specify where is pdbs/divided directory" % (
parser.pdbs
))
sys.exit(1)
PDB_PATH = args.pdbs
CONTACT_DIST = args.dist
pdbs_with_drug_path = args.pdbs_with_drug
if not os.path.exists(os.path.dirname(args.pdbs_with_drug)):
sys.stderr.write("can't %s create %s. Set pdbs_with_drug correctly" % (
pdbs_with_drug_path
))
sys.exit(1)
if not os.path.exists(os.path.dirname(args.distances)):
sys.stderr.write("can't %s create %s. Set distances correctly" % (
args.distances
))
sys.exit(1)
pdbs_procesados_path = args.procesados
print("In %s the processed pdbs are kept, if the file is deleted, the process starts from scratch " % pdbs_procesados_path)
print("Outputs: '%s' and '%s' " % (pdbs_with_drug_path,args.distances))
pdbs_procesados = []
if os.path.exists(pdbs_procesados_path):
with open(pdbs_procesados_path) as handle:
pdbs_procesados = [x.strip() for x in handle.readlines()]
pdbs_procesados = {x: 1 for x in pdbs_procesados}
pdbs_iterator = PDBsIterator(pdb_dir=args.pdbs)
def not_processed_iter():
for pdb, pdb_path in pdbs_iterator:
if pdb not in pdbs_procesados:
yield [pdb, pdb_path]
DNsPDBs = args.domains
if not os.path.exists(DNsPDBs):
seqs_from_pdb =args.seqs
if not os.path.exists(seqs_from_pdb):
sys.stderr.write("%s does not exists and %s not found. Specify where it is." % (
DNsPDBs,seqs_from_pdb
))
sys.exit(1)
sys.stderr.write("%s not found. You can create it with the following command: \n" % DNsPDBs)
sys.stderr.write(
"hmmscan --cut_tc --domtblout dns_pdbs.tlb --acc -o pdb_seq_res.hmm Pfam-A.hmm seqs_from_pdb.fasta")
sys.exit(1)
drugcompounds = [x for x, y in compound_type.items() if y in ["DRUG", "COFACTOR"]]
othercompounds = [x for x, y in compound_type.items() if y in ["METAL", "SUGAR", "NUCLEOTIDE", "LIPID"]]
aminoacidcompounds = [x for x, y in compound_type.items() if y in ["MODIFIED", "RESIDUE"]]
drugcompounds = othercompounds + drugcompounds
pdbs_with_drug_path = "/data/databases/pdb/processed/pdbs_with_drug.txt"
_log.info("proceced pdbs: %i" % len(pdbs_procesados))
ppb = CaPPBuilder()
p = PDBParser(PERMISSIVE=1, QUIET=1)
pdbs_with_drug = []
if os.path.exists(pdbs_with_drug_path):
_log.info("pdbs with drugs already loaded")
with open(pdbs_with_drug_path) as handle:
for x in handle.readlines():
pdbs_with_drug.append(x.strip())
else:
with open(pdbs_with_drug_path, "a") as handle:
_log.info("pdbs with drugs will be loaded")
pdbs = list(pdbs_iterator)
for pdb, file_path in tqdm(pdbs):
try:
if pdb not in pdbs_with_drug:
structure = p.get_structure(pdb, file_path)
for res in structure.get_residues():
if res.resname in drugcompounds:
pdbs_with_drug.append(pdb)
handle.write(pdb + "\n")
handle.flush()
break
except Exception as ex:
print (str(ex))
# import re
# dns_table = re.sub(r" +", "\t","\n".join( [str(i) + "\t" + x for i,x in enumerate(open('/data/databases/pdb/processed/dns_pdbs.tlb').readlines()) if not x.startswith("#") ]) )
if not os.path.exists(DNsPDBs + "2"):
cols = ["target_name", "accession", "tlen", "query_name", "accession2",
"qlen", "E-value", "score1", "bias1", "#", "of", "c-Evalue", "i-Evalue",
"score2", "bias2", "from1", "to1", "from2", "to2", "from3", "to3", "acc"]
_log.info("correcting hmmer-pdb output")
regexp = re.compile(" +")
items = []
for x in tqdm(open(DNsPDBs).readlines()):
if not x.startswith("#"):
line = regexp.split(x)
items.append(line[0:len(cols)])
# record = {c: line[i] for i, c in enumerate(cols)}
df_hmm = pd.DataFrame.from_records(items, columns=cols)
# df_hmm = df = pd.read_table('/data/databases/pdb/processed/dns_pdbs.tlb', index_col=None, header=None, delimiter=r"\s+",comment="#",names=cols)
# df_hmm = df_hmm.dropna()
df_hmm = df_hmm[["accession", "query_name", "from3", "to3"]]
df_hmm.to_csv(DNsPDBs + "2")
df_hmm["pdb"] = map(lambda x: x.split("_")[0].lower().strip(), df_hmm["query_name"])
df_hmm["chain"] = map(lambda x: x.split("_")[1].upper().strip(), df_hmm["query_name"])
df_hmm["start_res"] = map(lambda x: x.split("_")[2].upper().strip(), df_hmm["query_name"])
df_hmm["end_res"] = map(lambda x: x.split("_")[3].upper().strip(), df_hmm["query_name"])
else:
df_hmm = | pd.read_csv(DNsPDBs + "2") | pandas.read_csv |
#!/usr/bin/env python
"""plotlib.py: module is dedicated to plottting."""
__author__ = "<NAME>."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "<NAME>."
__email__ = "<EMAIL>"
__status__ = "Research"
import matplotlib
matplotlib.use("Agg")
import datetime as dt
from matplotlib.collections import LineCollection
from mpl_toolkits.axes_grid1 import SubplotDivider, Size
from mpl_toolkits.axes_grid1.mpl_axes import Axes
import matplotlib.pyplot as plt
from pylab import gca, gcf
import numpy as np
from matplotlib.transforms import Affine2D, Transform
import mpl_toolkits.axisartist.floating_axes as floating_axes
from matplotlib.projections import polar
from mpl_toolkits.axisartist.grid_finder import FixedLocator, DictFormatter
from types import MethodType
import glob
import pandas as pd
from dateutil import tz
from scipy.io import loadmat
import copy
from scipy.stats import skewnorm
from scipy.integrate import trapz
from scipy import signal
import sys
sys.path.append("sd_cartopy/")
import rad_fov
from fov import *
#from PyIF import te_compute as te
#from sklearn.feature_selection import mutual_info_regression as MIR
#from SALib.sample import saltelli
#from SALib.analyze import sobol
#from SALib.analyze import rbd_fast
import itertools
from math import pi
from matplotlib.legend_handler import HandlerPatch
font = {'size' : 8}
matplotlib.rc('font', **font)
class HandlerCircle(HandlerPatch):
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
center = 0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent
p = plt.Circle(xy=center, radius=orig_handle.radius)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
INT_F = 300
INT_R = 300
import utils
def textHighlighted(xy, text, ax=None, color="k", fontsize=None, xytext=(0,0),
zorder=None, text_alignment=(0,0), xycoords="data",
textcoords="offset points", **kwargs):
"""
Plot highlighted annotation (with a white lining)
Parameters
----------
xy : position of point to annotate
text : str text to show
ax : Optional[ ]
color : Optional[char]
text color; deafult is "k"
fontsize : Optional [ ] text font size; default is None
xytext : Optional[ ] text position; default is (0, 0)
zorder : text zorder; default is None
text_alignment : Optional[ ]
xycoords : Optional[ ] xy coordinate[1]; default is "data"
textcoords : Optional[ ] text coordinate[2]; default is "offset points"
**kwargs :
Notes
-----
Belongs to class rbspFp.
References
----------
[1] see `matplotlib.pyplot.annotate
<http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.annotate>`)
[2] see `matplotlib.pyplot.annotate
<http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.annotate>`)
"""
if ax is None: ax = gca()
text_path = mp.text.TextPath((0, 0), text, size=fontsize, **kwargs)
p1 = matplotlib.patches.PathPatch(text_path, ec="w", lw=4, fc="w", alpha=0.7,
zorder=zorder, transform=mp.transforms.IdentityTransform())
p2 = matplotlib.patches.PathPatch(text_path, ec="none", fc=color, zorder=zorder,
transform=mp.transforms.IdentityTransform())
offsetbox2 = matplotlib.offsetbox.AuxTransformBox(mp.transforms.IdentityTransform())
offsetbox2.add_artist(p1)
offsetbox2.add_artist(p2)
ab = mp.offsetbox.AnnotationBbox(offsetbox2, xy, xybox=xytext, xycoords=xycoords,
boxcoords=textcoords, box_alignment=text_alignment, frameon=False)
ab.set_zorder(zorder)
ax.add_artist(ab)
return
def addColorbar(mappable, ax):
"""
Append colorbar to axes
Parameters
----------
mappable : a mappable object
ax : an axes object
Returns
-------
cbax : colorbar axes object
Notes
-----
This is mostly useful for axes created with :func:`curvedEarthAxes`.
"""
fig1 = ax.get_figure()
divider = SubplotDivider(fig1, *ax.get_geometry(), aspect=True)
# axes for colorbar
cbax = Axes(fig1, divider.get_position())
h = [Size.AxesX(ax), # main axes
Size.Fixed(0.05), # padding
Size.Fixed(0.1)] # colorbar
v = [Size.AxesY(ax)]
_ = divider.set_horizontal(h)
_ = divider.set_vertical(v)
_ = ax.set_axes_locator(divider.new_locator(nx=0, ny=0))
_ = cbax.set_axes_locator(divider.new_locator(nx=2, ny=0))
_ = fig1.add_axes(cbax)
_ = cbax.axis["left"].toggle(all=False)
_ = cbax.axis["top"].toggle(all=False)
_ = cbax.axis["bottom"].toggle(all=False)
_ = cbax.axis["right"].toggle(ticklabels=True, label=True)
_ = plt.colorbar(mappable, cax=cbax, shrink=0.1)
return cbax
def curvedEarthAxes(rect=111, fig=None, minground=0., maxground=2000, minalt=0,
maxalt=500, Re=6371., nyticks=5, nxticks=4):
"""
Create curved axes in ground-range and altitude
Parameters
----------
rect : Optional[int] subplot spcification
fig : Optional[pylab.figure object] (default to gcf)
minground : Optional[float]
maxground : Optional[int] maximum ground range [km]
minalt : Optional[int] lowest altitude limit [km]
maxalt : Optional[int] highest altitude limit [km]
Re : Optional[float] Earth radius in kilometers
nyticks : Optional[int] Number of y axis tick marks; default is 5
nxticks : Optional[int] Number of x axis tick marks; deafult is 4
Returns
-------
ax : matplotlib.axes object containing formatting
aax : matplotlib.axes objec containing data
"""
ang = maxground / Re
minang = minground / Re
angran = ang - minang
angle_ticks = [(0, "{:.0f}".format(minground))]
while angle_ticks[-1][0] < angran:
tang = angle_ticks[-1][0] + 1./nxticks*angran
angle_ticks.append((tang, "{:.0f}".format((tang-minang)*Re)))
grid_locator1 = FixedLocator([v for v, s in angle_ticks])
tick_formatter1 = DictFormatter(dict(angle_ticks))
altran = float(maxalt - minalt)
alt_ticks = [(minalt+Re, "{:.0f}".format(minalt))]
while alt_ticks[-1][0] < Re+maxalt:
alt_ticks.append((altran / float(nyticks) + alt_ticks[-1][0],
"{:.0f}".format(altran / float(nyticks) +
alt_ticks[-1][0] - Re)))
_ = alt_ticks.pop()
grid_locator2 = FixedLocator([v for v, s in alt_ticks])
tick_formatter2 = DictFormatter(dict(alt_ticks))
tr_rotate = Affine2D().rotate(np.pi/2-ang/2)
tr_shift = Affine2D().translate(0, Re)
tr = polar.PolarTransform() + tr_rotate
grid_helper = floating_axes.GridHelperCurveLinear(tr, extremes=(0, angran, Re+minalt, Re+maxalt),
grid_locator1=grid_locator1, grid_locator2=grid_locator2, tick_formatter1=tick_formatter1,
tick_formatter2=tick_formatter2,)
if not fig: fig = plt.figure(figsize=(5,3), dpi=240)
ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)
# adjust axis
print("adjust ax")
ax1.set_ylabel(r"Height, $km$", fontdict={"size":2})
ax1.set_xlabel(r"Ground Range, $km$", fontdict={"size":2})
ax1.invert_xaxis()
ax1.minground = minground
ax1.maxground = maxground
ax1.minalt = minalt
ax1.maxalt = maxalt
ax1.Re = Re
fig.add_subplot(ax1, transform=tr)
# create a parasite axes whose transData in RA, cz
aux_ax = ax1.get_aux_axes(tr)
# for aux_ax to have a clip path as in ax
aux_ax.patch = ax1.patch
# but this has a side effect that the patch is drawn twice, and possibly
# over some other artists. So, we decrease the zorder a bit to prevent this.
ax1.patch.zorder=0.9
return ax1, aux_ax
def plot_edens(time, beam=None, maxground=2000, maxalt=500,
nel_cmap="jet", nel_lim=[10, 12], title=False,
fig=None, rect=111, ax=None, aax=None,plot_colorbar=True,
nel_rasterize=False):
"""
Plot electron density profile
Parameters
----------
time : datetime.datetime time of profile
beam : Optional[ ] beam number
maxground : Optional[int]
maximum ground range [km]
maxalt : Optional[int] highest altitude limit [km]
nel_cmap : Optional[str] color map name for electron density index coloring
nel_lim : Optional[list, int] electron density index plotting limits
title : Optional[bool] Show default title
fig : Optional[pylab.figure] object (default to gcf)
rect : Optional[int] subplot spcification
ax : Optional[ ] Existing main axes
aax : Optional[ ] Existing auxialary axes
plot_colorbar : Optional[bool] Plot a colorbar
nel_rasterize : Optional[bool] Rasterize the electron density plot
Returns
-------
ax : matplotlib.axes object containing formatting
aax : matplotlib.axes object containing data
cbax : matplotlib.axes object containing colorbar
"""
return
def get_polar(d, Re=6371.):
""" Convert to polar coordinates """
th = d.grange / Re
r = d.height + Re
dop, sth, dth = d.dop, d.sth, d.dth
return th, r, dop, sth, dth
def plot_rays(dic, time, ti, beam, case, txt, maxground=2000, maxalt=500, step=1,
showrefract=False, nr_cmap="jet_r", nr_lim=[0.0, .1],
raycolor="0.3", title=True, zorder=2, alpha=1,
fig=None, rect=111, ax=None, aax=None):
"""
Plot ray paths
Parameters
----------
dic: str location of the data files
time: datetime.datetime time of rays
ti: int time index
beam: beam number
maxground : Optional[int] maximum ground range [km]
maxalt : Optional[int] highest altitude limit [km]
step : Optional[int] step between each plotted ray (in number of ray steps)
showrefract : Optional[bool] show refractive index along ray paths (supersedes raycolor)
nr_cmap : Optional[str] color map name for refractive index coloring
nr_lim : Optional[list, float] refractive index plotting limits
raycolor : Optional[float] color of ray paths
title : Optional[bool] Show default title
zorder : Optional[int]
alpha : Optional[int]
fig : Optional[pylab.figure] object (default to gcf)
rect : Optional[int] subplot spcification
ax : Optional[ ] Existing main axes
aax : Optional[ ] Existing auxialary axes
Returns
-------
ax : matplotlib.axes object containing formatting
aax : matplotlib.axes object containing data
cbax : matplotlib.axes object containing colorbar
"""
if not ax and not aax: ax, aax = curvedEarthAxes(fig=fig, rect=rect, maxground=maxground, maxalt=maxalt)
else:
if hasattr(ax, "time"): time = ax.time
if hasattr(ax, "beam"): beam = ax.beam
files = glob.glob(dic + "ti({ti}).bm({bm}).elv(*).{case}.csv".format(ti=ti, bm=beam, case=case))
files.sort()
for f in files:
th, r, v, _, _ = get_polar(pd.read_csv(f))
if not showrefract: aax.plot(th, r, c=raycolor, zorder=zorder, alpha=alpha)
else:
points = np.array([th, r]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lcol = LineCollection( segments, zorder=zorder, alpha=alpha)
_ = lcol.set_cmap( nr_cmap )
_ = lcol.set_norm( plt.Normalize(*nr_lim) )
_ = lcol.set_array( v )
_ = aax.add_collection( lcol )
if title:
stitle = "%s UT"%time.strftime("%Y-%m-%d %H:%M")
ax.set_title( stitle )
ax.text(1.05, 0.5, txt, horizontalalignment="center", verticalalignment="center",
transform=ax.transAxes, rotation=90)
if showrefract:
cbax = addColorbar(lcol, ax)
_ = cbax.set_ylabel(r"$\Delta$ f")
else: cbax = None
ax.beam = beam
fig = ax.get_figure()
fig.savefig(dic + "rt.ti({ti}).bm({bm}).{case}.png".format(ti=ti, bm=beam, case=case), bbox_inches="tight")
plt.close()
return ax, aax, cbax
def plot_exp_rays(dic, time, beam, cat="bgc", maxground=2000, maxalt=300, step=1,
showrefract=False, nr_cmap="jet_r", nr_lim=[0.8, 1.],
raycolor="0.3", title=False, zorder=2, alpha=1,
fig=None, rect=111, ax=None, aax=None):
""" Plot ray paths (previous method) """
if not ax and not aax: ax, aax = curvedEarthAxes(fig=fig, rect=rect, maxground=maxground, maxalt=maxalt)
else:
if hasattr(ax, "time"): time = ax.time
if hasattr(ax, "beam"): beam = ax.beam
files = glob.glob(dic + "exp.{cat}.bm({bm}).elv(*).csv".format(cat=cat, bm=beam))
files.sort()
for f in files:
th, r, v, _, _ = get_polar(pd.read_csv(f))
if not showrefract: aax.plot(th, r, c=raycolor, zorder=zorder, alpha=alpha)
else:
points = np.array([th, r]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lcol = LineCollection( segments, zorder=zorder, alpha=alpha)
_ = lcol.set_cmap( nr_cmap )
_ = lcol.set_norm( plt.Normalize(*nr_lim) )
_ = lcol.set_array( v )
_ = aax.add_collection( lcol )
if title:
stitle = ""
ax.set_title( stitle )
if showrefract:
cbax = addColorbar(lcol, ax)
_ = cbax.set_ylabel(r"$\Delta$ f")
else: cbax = None
ax.beam = beam
fig = ax.get_figure()
fig.savefig(dic + "rt.exp.{cat}.bm({bm}).png".format(cat=cat, bm=beam), bbox_inches="tight")
plt.close()
return ax, aax, cbax
def plot_radstn(p,f,pz,fz,fname,lat,lon,t,zone="America/New_York"):
""" Plot radar vertical dataset """
fig = plt.figure(figsize=(4,4), dpi=120)
ax = fig.add_subplot(111)
ax.set_ylabel("Alt. [km]")
ax.set_xlabel(r"EDens [$cm^{-3}$]")
ax.semilogx(p, pz, "r")
ax.semilogx(f, fz, "r--")
ax.set_ylim(50, 130)
ax.set_xlim(1e2, 1e7)
sza = utils.calculate_sza(t, lat, lon, alt=300)
l = t.replace(tzinfo=tz.gettz("UTC")).astimezone(tz.gettz("America/New_York"))
ax.set_title(r"UT-%s"%(t.strftime("%Y-%m-%d %H:%M")))
ax.text(1.05, 0.5, "Loc:(%.1f,%.1f), $\chi$-%.1f, LT-%s"%(lat, lon, sza, l.strftime("%H:%M")),
horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, rotation=90)
fig.savefig(fname,bbox_inches="tight")
plt.close()
return
def plot_velocity_ts(dn, rad, bmnum):
""" Plot velocity TS data """
fig = plt.figure(figsize=(6,6), dpi=150)
axs = [fig.add_subplot(311), fig.add_subplot(312), fig.add_subplot(313)]
mkeys = ["<KEY>"]
fmt = matplotlib.dates.DateFormatter("%H:%M")
fname = "data/sim/{dn}/{rad}/velocity.ts.csv".format(dn=dn.strftime("%Y.%m.%d.%H.%M"), rad=rad)
sdat = | pd.read_csv(fname, parse_dates=["dn"]) | pandas.read_csv |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from featuretools.primitives import (
Age,
EmailAddressToDomain,
IsFreeEmailDomain,
TimeSince,
URLToDomain,
URLToProtocol,
URLToTLD,
Week,
get_transform_primitives
)
def test_time_since():
time_since = TimeSince()
# class datetime.datetime(year, month, day[, hour[, minute[, second[, microsecond[,
times = pd.Series([datetime(2019, 3, 1, 0, 0, 0, 1),
datetime(2019, 3, 1, 0, 0, 1, 0),
datetime(2019, 3, 1, 0, 2, 0, 0)])
cutoff_time = datetime(2019, 3, 1, 0, 0, 0, 0)
values = time_since(array=times, time=cutoff_time)
assert(list(map(int, values)) == [0, -1, -120])
time_since = TimeSince(unit='nanoseconds')
values = time_since(array=times, time=cutoff_time)
assert(list(map(round, values)) == [-1000, -1000000000, -120000000000])
time_since = TimeSince(unit='milliseconds')
values = time_since(array=times, time=cutoff_time)
assert(list(map(int, values)) == [0, -1000, -120000])
time_since = TimeSince(unit='Milliseconds')
values = time_since(array=times, time=cutoff_time)
assert(list(map(int, values)) == [0, -1000, -120000])
time_since = TimeSince(unit='Years')
values = time_since(array=times, time=cutoff_time)
assert(list(map(int, values)) == [0, 0, 0])
times_y = pd.Series([datetime(2019, 3, 1, 0, 0, 0, 1),
datetime(2020, 3, 1, 0, 0, 1, 0),
datetime(2017, 3, 1, 0, 0, 0, 0)])
time_since = TimeSince(unit='Years')
values = time_since(array=times_y, time=cutoff_time)
assert(list(map(int, values)) == [0, -1, 1])
error_text = 'Invalid unit given, make sure it is plural'
with pytest.raises(ValueError, match=error_text):
time_since = TimeSince(unit='na')
time_since(array=times, time=cutoff_time)
def test_age():
age = Age()
dates = pd.Series(datetime(2010, 2, 26))
ages = age(dates, time=datetime(2020, 2, 26))
correct_ages = [10.005] # .005 added due to leap years
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_age_two_years_quarterly():
age = Age()
dates = pd.Series(pd.date_range('2010-01-01', '2011-12-31', freq='Q'))
ages = age(dates, time=datetime(2020, 2, 26))
correct_ages = [9.915, 9.666, 9.414, 9.162, 8.915, 8.666, 8.414, 8.162]
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_age_leap_year():
age = Age()
dates = pd.Series([datetime(2016, 1, 1)])
ages = age(dates, time=datetime(2016, 3, 1))
correct_ages = [(31 + 29) / 365.0]
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
# born leap year date
dates = pd.Series([datetime(2016, 2, 29)])
ages = age(dates, time=datetime(2020, 2, 29))
correct_ages = [4.0027] # .0027 added due to leap year
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_age_nan():
age = Age()
dates = pd.Series([datetime(2010, 1, 1), np.nan, datetime(2012, 1, 1)])
ages = age(dates, time=datetime(2020, 2, 26))
correct_ages = [10.159, np.nan, 8.159]
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_week_no_deprecation_message():
dates = [datetime(2019, 1, 3),
datetime(2019, 6, 17, 11, 10, 50),
datetime(2019, 11, 30, 19, 45, 15)
]
with pytest.warns(None) as record:
week = Week()
week(dates).tolist()
assert not record
def test_url_to_domain_urls():
url_to_domain = URLToDomain()
urls = pd.Series(['https://play.google.com/store/apps/details?id=com.skgames.trafficracer%22',
'http://mplay.google.co.in/sadfask/asdkfals?dk=10',
'http://lplay.google.co.in/sadfask/asdkfals?dk=10',
'http://play.google.co.in/sadfask/asdkfals?dk=10',
'http://tplay.google.co.in/sadfask/asdkfals?dk=10',
'http://www.google.co.in/sadfask/asdkfals?dk=10',
'www.google.co.in/sadfask/asdkfals?dk=10',
'http://user:[email protected]/?a=b#asdd',
'https://www.compzets.com?asd=10',
'www.compzets.com?asd=10',
'facebook.com',
'https://www.compzets.net?asd=10',
'http://www.featuretools.org'])
correct_urls = ['play.google.com',
'mplay.google.co.in',
'lplay.google.co.in',
'play.google.co.in',
'tplay.google.co.in',
'google.co.in',
'google.co.in',
'google.com',
'compzets.com',
'compzets.com',
'facebook.com',
'compzets.net',
'featuretools.org']
np.testing.assert_array_equal(url_to_domain(urls), correct_urls)
def test_url_to_domain_long_url():
url_to_domain = URLToDomain()
urls = pd.Series(["http://chart.apis.google.com/chart?chs=500x500&chma=0,0,100, \
100&cht=p&chco=FF0000%2CFFFF00%7CFF8000%2C00FF00%7C00FF00%2C0 \
000FF&chd=t%3A122%2C42%2C17%2C10%2C8%2C7%2C7%2C7%2C7%2C6%2C6% \
2C6%2C6%2C5%2C5&chl=122%7C42%7C17%7C10%7C8%7C7%7C7%7C7%7C7%7C \
6%7C6%7C6%7C6%7C5%7C5&chdl=android%7Cjava%7Cstack-trace%7Cbro \
adcastreceiver%7Candroid-ndk%7Cuser-agent%7Candroid-webview%7 \
Cwebview%7Cbackground%7Cmultithreading%7Candroid-source%7Csms \
%7Cadb%7Csollections%7Cactivity|Chart"])
correct_urls = ['chart.apis.google.com']
results = url_to_domain(urls)
np.testing.assert_array_equal(results, correct_urls)
def test_url_to_domain_nan():
url_to_domain = URLToDomain()
urls = pd.Series(['www.featuretools.com', np.nan], dtype='object')
correct_urls = pd.Series(['featuretools.com', np.nan], dtype='object')
results = url_to_domain(urls)
pd.testing.assert_series_equal(results, correct_urls)
def test_url_to_protocol_urls():
url_to_protocol = URLToProtocol()
urls = pd.Series(['https://play.google.com/store/apps/details?id=com.skgames.trafficracer%22',
'http://mplay.google.co.in/sadfask/asdkfals?dk=10',
'http://lplay.google.co.in/sadfask/asdkfals?dk=10',
'www.google.co.in/sadfask/asdkfals?dk=10',
'http://user:[email protected]/?a=b#asdd',
'https://www.compzets.com?asd=10',
'www.compzets.com?asd=10',
'facebook.com',
'https://www.compzets.net?asd=10',
'http://www.featuretools.org',
'https://featuretools.com'])
correct_urls = pd.Series(['https',
'http',
'http',
np.nan,
'http',
'https',
np.nan,
np.nan,
'https',
'http',
'https'])
results = url_to_protocol(urls)
pd.testing.assert_series_equal(results, correct_urls)
def test_url_to_protocol_long_url():
url_to_protocol = URLToProtocol()
urls = pd.Series(["http://chart.apis.google.com/chart?chs=500x500&chma=0,0,100, \
100&cht=p&chco=FF0000%2CFFFF00%7CFF8000%2C00FF00%7C00FF00%2C0 \
000FF&chd=t%3A122%2C42%2C17%2C10%2C8%2C7%2C7%2C7%2C7%2C6%2C6% \
2C6%2C6%2C5%2C5&chl=122%7C42%7C17%7C10%7C8%7C7%7C7%7C7%7C7%7C \
6%7C6%7C6%7C6%7C5%7C5&chdl=android%7Cjava%7Cstack-trace%7Cbro \
adcastreceiver%7Candroid-ndk%7Cuser-agent%7Candroid-webview%7 \
Cwebview%7Cbackground%7Cmultithreading%7Candroid-source%7Csms \
%7Cadb%7Csollections%7Cactivity|Chart"])
correct_urls = ['http']
results = url_to_protocol(urls)
np.testing.assert_array_equal(results, correct_urls)
def test_url_to_protocol_nan():
url_to_protocol = URLToProtocol()
urls = pd.Series(['www.featuretools.com', np.nan, ''], dtype='object')
correct_urls = pd.Series([np.nan, np.nan, np.nan], dtype='object')
results = url_to_protocol(urls)
pd.testing.assert_series_equal(results, correct_urls)
def test_url_to_tld_urls():
url_to_tld = URLToTLD()
urls = pd.Series(['https://play.google.com/store/apps/details?id=com.skgames.trafficracer%22',
'http://mplay.google.co.in/sadfask/asdkfals?dk=10',
'http://lplay.google.co.in/sadfask/asdkfals?dk=10',
'http://play.google.co.in/sadfask/asdkfals?dk=10',
'http://tplay.google.co.in/sadfask/asdkfals?dk=10',
'http://www.google.co.in/sadfask/asdkfals?dk=10',
'www.google.co.in/sadfask/asdkfals?dk=10',
'http://user:[email protected]/?a=b#asdd',
'https://www.compzets.dev?asd=10',
'www.compzets.com?asd=10',
'https://www.compzets.net?asd=10',
'http://www.featuretools.org',
'featuretools.org'])
correct_urls = ['com',
'in',
'in',
'in',
'in',
'in',
'in',
'com',
'dev',
'com',
'net',
'org',
'org']
np.testing.assert_array_equal(url_to_tld(urls), correct_urls)
def test_url_to_tld_long_url():
url_to_tld = URLToTLD()
urls = pd.Series(["http://chart.apis.google.com/chart?chs=500x500&chma=0,0,100, \
100&cht=p&chco=FF0000%2CFFFF00%7CFF8000%2C00FF00%7C00FF00%2C0 \
000FF&chd=t%3A122%2C42%2C17%2C10%2C8%2C7%2C7%2C7%2C7%2C6%2C6% \
2C6%2C6%2C5%2C5&chl=122%7C42%7C17%7C10%7C8%7C7%7C7%7C7%7C7%7C \
6%7C6%7C6%7C6%7C5%7C5&chdl=android%7Cjava%7Cstack-trace%7Cbro \
adcastreceiver%7Candroid-ndk%7Cuser-agent%7Candroid-webview%7 \
Cwebview%7Cbackground%7Cmultithreading%7Candroid-source%7Csms \
%7Cadb%7Csollections%7Cactivity|Chart"])
correct_urls = ['com']
np.testing.assert_array_equal(url_to_tld(urls), correct_urls)
def test_url_to_tld_nan():
url_to_tld = URLToTLD()
urls = pd.Series(['www.featuretools.com', np.nan, 'featuretools', ''], dtype='object')
correct_urls = pd.Series(['com', np.nan, np.nan, np.nan], dtype='object')
results = url_to_tld(urls)
pd.testing.assert_series_equal(results, correct_urls, check_names=False)
def test_is_free_email_domain_valid_addresses():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series(['<EMAIL>', '<EMAIL>', '<EMAIL>', 'free<EMAIL>'])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([True, False, True, True])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_valid_addresses_whitespace():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series([' <EMAIL>', ' <EMAIL>', '<EMAIL> ', ' <EMAIL> '])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([True, False, True, True])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_nan():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series([np.nan, '<EMAIL>', '<EMAIL>'])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([np.nan, False, True])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_empty_string():
is_free_email_domain = IsFreeEmailDomain()
array = | pd.Series(['', '<EMAIL>', '<EMAIL>']) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) University of St Andrews 2020-2021
# (c) University of Strathclyde 2020-2021
# Author:
# <NAME>
#
# Contact
# <EMAIL>
#
# <NAME>,
# Biomolecular Sciences Building,
# University of St Andrews,
# North Haugh Campus,
# St Andrews,
# KY16 9ST
# Scotland,
# UK
#
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Bio.PDB reference:
# Hamelryck, T., Manderick, B. (2003) PDB parser and structure class
# implemented in Python. Bioinformatics 19: 2308–2310
"""Extract protein sequences from local CAZyme database, write to FASTA files and/or BLAST db"""
import logging
import sys
import pandas as pd
from datetime import datetime
from typing import List, Optional
from tqdm import tqdm
from Bio import SeqIO
from Bio.Blast.Applications import NcbimakeblastdbCommandline
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from saintBioutils.utilities.file_io import make_output_directory
from saintBioutils.utilities.logger import config_logger
from cazy_webscraper import closing_message, connect_existing_db
from cazy_webscraper.sql.sql_interface.get_data import get_selected_gbks, get_table_dicts
from cazy_webscraper.sql.sql_interface.get_data.get_records import (
get_user_genbank_sequences,
get_user_uniprot_sequences
)
from cazy_webscraper.utilities import parse_configuration
from cazy_webscraper.utilities.parsers.extract_seq_parser import build_parser
def main(argv: Optional[List[str]] = None, logger: Optional[logging.Logger] = None):
"""Set up programme and initate run."""
time_stamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
start_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") # used in terminating message
start_time = | pd.to_datetime(start_time) | pandas.to_datetime |
###############################################################################
# #
# pre-processing and dataset construction #
# July 6 2020 #
###############################################################################
### Loading libraries #########################################################
import os
import pickle
import pandas as pd
import numpy as np
import re
import string
import time
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.tag import StanfordNERTagger
from nltk.tokenize import word_tokenize
from sklearn.model_selection import train_test_split
######################################################## Loading libraries ####
### Declaring I/O variables ###################################################
input_file = 'input_file.xlsx'
output_file = 'pre-processed_data.pickle'
output_summary = 'pre-processed_data_summary.xlsx'
aux_data = 'auxiliar_data.xlsx'
names_pickle = "names_dict.pickle"
################################################## Declaring I/O variables ####
### Declaring Functions #######################################################
def load_NER():
# NER tagger should be installed prior to use.
# Please refer to https://nlp.stanford.edu/software/CRF-NER.html
java_path = "C:\\Program Files\\Java\\jdk-12.0.2\\bin\\java.exe"
os.environ['JAVAHOME'] = java_path
st = StanfordNERTagger('G:\\API\\Stanford\\stanford-ner-2018-10-16\\classifiers\\english.all.3class.distsim.crf.ser.gz',
'G:\\API\\Stanford\\stanford-ner-2018-10-16\\stanford-ner.jar',
encoding='utf-8')
return st
def mount_dict(file, sheet):
df = pd.read_excel(file, sheet_name = sheet)
key = list(df)[0]
meaning = list(df)[1]
df.index = df[key].str.len()
df = df.sort_index(ascending = False).reset_index(drop=True)
df = df.set_index(key).to_dict()[meaning]
return df
def apply_dict(text, dictionary):
for word in dictionary.keys():
text = re.sub("(?<![a-zA-Z0-9])"+word+"(?![a-zA-Z0-9])",
dictionary[word],
text,
flags=re.I)
return text
def lemmatization(text):
WNL = WordNetLemmatizer()
tokenized_text = nltk.word_tokenize(text)
return ' '.join([WNL.lemmatize(word) for word in tokenized_text])
def mount_list(file, sheet):
df = pd.read_excel(file, sheet_name = sheet)
ref_label = list(df)[0]
df.index = df[ref_label].str.len()
df = df.sort_index(ascending = False).reset_index(drop=True)
return df[ref_label].tolist()
def mount_lowercasing_list():
WNL = WordNetLemmatizer()
relations_list = list(mount_dict(aux_data,'relations&genders').keys())
countries_list = mount_list(aux_data,'lower_country')
nationalities_list = mount_list(aux_data,'lower_nationality')
languages_list = mount_list(aux_data,'lower_language')
cities_list = mount_list(aux_data,'lower_city')
provinces_list = mount_list(aux_data,'lower_province')
miscellania_list = mount_list(aux_data,'lower_miscellania')
lower_stopwords_list = mount_list(aux_data,'lower_stopwords')
lower_calendar_list = mount_list(aux_data,'lower_calendar')
lowercasing_list = relations_list + countries_list + nationalities_list + languages_list + cities_list + provinces_list + miscellania_list + lower_stopwords_list + lower_calendar_list
lowercasing_list = sorted(lowercasing_list, key = len, reverse = True)
lowercasing_list = [WNL.lemmatize(word.lower()) for word in lowercasing_list]
return lowercasing_list
def lowercase(text, lowercasing_list):
for word in lowercasing_list:
text = re.sub('(?<![a-zA-Z0-9])'+word+'(?![a-zA-Z0-9])',
word.lower(),
text,
flags=re.I)
return text
def search4names(text):
# Open pickle with names
with open(names_pickle, 'rb') as handle:
names_dict = pickle.load(handle)
names = list(names_dict.keys())
new_text = []
NER = load_NER()
tokenized_text = nltk.word_tokenize(text)
tagged_text = NER.tag(tokenized_text)
# title case names and lowercase other terms
for word, tag in tagged_text:
if (tag == "PERSON") or (word.title() in names):
new_text.append(word.title())
else:
new_text.append(word.lower())
text = ' '.join(new_text)
# Register which tokens are names (trigrams)
tokenized_text = nltk.word_tokenize(text)
for w1,w2,w3 in nltk.trigrams(tokenized_text):
if ((len(w1) > 1) & (len(w2) > 1) & (len(w3) > 1)):
if ((w1[0].isupper()) & (w1[1].islower())) & \
((w2[0].isupper()) & (w2[1].islower())) & \
((w3[0].isupper()) & (w3[1].islower())) & \
(w1 in names):
gender = names_dict[w1]
text = re.sub('(?<![a-zA-Z0-9])'+w1+' '+w2+' '+w3+'(?![a-zA-Z0-9])', \
'|N|'+w1+'_'+w2+'_'+w3+'|'+str(gender)+'|', \
text)
elif ((w1[0].isupper()) & (w1[1].islower())) & \
((w2[0].isupper()) & (w2[1].islower())) & \
((w3[0].isupper()) & (w3[1].islower())):
text = re.sub('(?<![a-zA-Z0-9])'+w1+' '+w2+' '+w3+'(?![a-zA-Z0-9])', \
'|N|'+w1+'_'+w2+'_'+w3+'|U|', \
text)
# Register which tokens are names (bigrams)
tokenized_text = nltk.word_tokenize(text)
for w1,w2 in nltk.bigrams(tokenized_text):
if ((len(w1) > 1) & (len(w2) > 1)):
if ((w1[0].isupper()) & (w1[1].islower())) & \
((w2[0].isupper()) & (w2[1].islower())) & \
(w1 in names):
gender = names_dict[w1]
text = re.sub('(?<![a-zA-Z0-9])'+w1+' '+w2+'(?![a-zA-Z0-9])', \
'|N|'+w1+'_'+w2+'|'+str(gender)+'|', \
text)
elif ((w1[0].isupper()) & (w1[1].islower())) & \
((w2[0].isupper()) & (w2[1].islower())):
text = re.sub('(?<![a-zA-Z0-9])'+w1+' '+w2+'(?![a-zA-Z0-9])', \
'|N|'+w1+'_'+w2+'|U|', \
text)
# Register which tokens are names (unigrams)
tokenized_text = nltk.word_tokenize(text)
for w1 in tokenized_text:
if (len(w1) > 1):
if ((w1[0].isupper()) & (w1[1].islower())):
if (w1 in names):
gender = names_dict[w1]
text = re.sub('(?<!(\w)|\|)'+w1+'(?!(\w)|\|)','|N|'+w1+'|'+str(gender)+'|',text)
else:
text = re.sub('(?<!(\w)|\|)'+w1+'(?!(\w)|\|)', '|N|'+w1+'|U|', text)
return text
def search4relations(text):
relations_list = mount_list(aux_data,'relations&genders')
WNL = WordNetLemmatizer()
relations_list = [WNL.lemmatize(word.lower()) for word in relations_list]
for word in relations_list:
text = re.sub('(?<!(\w)|\|)'+word+'(?!(\w)|\|)',
'|R|'+word+'|r|',
text,
flags=re.I)
return text
def remove_untagged(x):
text = [word if '|N|' in word else \
word if '|R|' in word else \
word if '.' in word else "" for word in nltk.word_tokenize(x)]
text = ' '.join([word for word in text if len(word) > 0])
return text
def extract_info(text, record_id):
# Identify the sequence that info occurs
TokenList = nltk.word_tokenize(text)
Sequence = [] #list of tuples ((0:name,1:relation,2:'.'), tokenlist position)
for t in range(0,len(TokenList)):
token = TokenList[t]
if '|N|' in token:
Sequence.append((0,t))
elif '|R|' in token:
Sequence.append((1,t))
elif ('.' in token) or (',' in token):
Sequence.append((2,t))
NoInfo = len(Sequence)
# Identifies in which token occurs a "break". A break is when the next info
# refers to other combination of name/relation.
BreakIndex = [0]
FlagN = 0
FlagR = 0
for n in range(0,NoInfo):
Categorie = Sequence[n][0]
if (Categorie == 2) & (n != (NoInfo-1)):
FlagN = 0
FlagR = 0
BreakIndex.append(n)
elif (Categorie == 0) & (FlagN == 1):
FlagR = 0
BreakIndex.append(n)
if (Categorie == 0) & (FlagN == 0):
FlagN = 1
elif (Categorie == 0) & (FlagN == 1):
FlagR = 0
BreakIndex.append(n)
elif (Categorie == 1) & (FlagR == 0):
FlagR = 1
elif (Categorie == 1) & (FlagR == 1):
FlagN = 0
BreakIndex.append(n)
if BreakIndex[-1] != NoInfo:
BreakIndex.append(NoInfo)
Breaks = [[x,y] for x,y in zip(BreakIndex, BreakIndex[1:])]
TempDF = pd.DataFrame(columns=['Record_ID','Name','Relation'])
# Create one line for each name/relation identified.
for n in range(0,len(Breaks)):
Start = Breaks[n][0]
End = Breaks[n][1]
Slice = Sequence[Start:End]
Categories = []
for [Categorie,Index] in Slice:
Categories.append(Categorie)
# Avoiding slices with only punct
if ((2 in Categories) & (0 not in Categories) & (1 not in Categories)):
if n != 0:
Breaks[n-1][1] = Breaks[n][1]
Breaks[n] = Breaks[n-1]
# Removing duplicate slices (they occur if there were slices with only punct
NewBreaks = []
for i in Breaks:
if i not in NewBreaks:
NewBreaks.append(i)
Breaks = NewBreaks
for n in range(0,len(Breaks)):
Start = Breaks[n][0]
End = Breaks[n][1]
Slice = Sequence[Start:End]
Slice.sort(key=lambda x: x[0])
for [Categorie,Index] in Slice:
if Categorie == 0:
TempDF.loc[n,'Name'] = TokenList[Index]
if Categorie == 1:
TempDF.loc[n,'Relation'] = TokenList[Index]
TempDF.loc[n,'Record_ID'] = record_id
return TempDF
def adjust_info(table):
# Identifying male and female relations
relations_dict = mount_dict(aux_data,'relations&genders')
relations_dict = pd.DataFrame.from_dict(relations_dict,
orient = 'index',
columns = ['gender'])
relations_dict.reset_index(inplace = True)
male_relations_list = list(relations_dict[relations_dict['gender'] == 'M']['index'])
female_relations_list = list(relations_dict[relations_dict['gender'] == 'F']['index'])
# Drop duplicate records
table = table.drop_duplicates().reset_index(drop = True)
# Delete prefixes and sufixes for analysis
table['Name'] = table['Name'].map(lambda x: re.sub('(?<![a-zA-Z0-9])(nan)(?![a-zA-Z0-9])','|N|NA|U|',str(x)))
table['Name'] = table['Name'].map(lambda x: re.sub('(?<![0-9])_(?![0-9])',' ',str(x)))
table['Relation'] = table['Relation'].map(lambda x: re.sub('(?<![a-zA-Z0-9])(nan)(?![a-zA-Z0-9])','|R|NA|U|',str(x)))
# Adjusting names
for n in range(0,len(table)):
Actual = table.loc[n,'Name'][3:-3]
for m in range(0,len(table)):
Other = table.loc[m,'Name'][3:-3]
if (str(Actual) != '|N|NA|U|') & (str(Other) != '|N|NA|U|'):
if Actual.find(Other) >= 0:
LenActual = len(Actual)
LenOther = len(Other)
if LenActual > LenOther:
table.loc[m,'Name'] = table.loc[n,'Name']
table = table.drop_duplicates().reset_index(drop = True)
# Adjusting relation = NA when name already exist
temp_table = pd.DataFrame()
names_list = list(table['Name'].unique())
for name in names_list:
temp1 = table[table['Name'] == name]
if len(temp1) > 1:
temp2 = temp1[temp1['Relation'] != "|R|NA|U|"]
temp_table = pd.concat([temp_table,temp2], axis = 0)
else:
temp_table = pd.concat([temp_table,temp1], axis = 0)
table = temp_table.copy()
# Adjusting name = NA when relation already exist
new_table = pd.DataFrame()
relations_list = list(table['Relation'].unique())
for relation in relations_list:
temp1 = table[table['Relation'] == relation]
if len(temp1) > 1:
temp2 = temp1[temp1['Name'] != "|N|NA|U|"]
new_table = pd.concat([new_table,temp2], axis = 0)
else:
new_table = pd.concat([new_table,temp1], axis = 0)
table = new_table.reset_index(drop = True).copy()
# Adjusting remaining relation = NA
for n in range(0, len(table)):
if table.loc[n,'Relation'] == '|R|NA|U|':
table.loc[n,'Relation'] = '|R|other|r|'
# Including gender on relation
for n in range(0, len(table)):
relation_temp = table.loc[n,'Relation']
relation_temp_free = re.sub('(\|R\|)|(\|r\|)','',str(relation_temp))
if relation_temp_free in male_relations_list:
gender = 'M'
elif relation_temp_free in female_relations_list:
gender = 'F'
else:
gender_letter = table.loc[n,'Name'][-2:-1]
if gender_letter == 'F':
gender = 'F'
elif gender_letter == 'M':
gender = 'M'
else:
gender = 'U'
relation = relation_temp[:-2]+gender+'|'
table.loc[n,'Relation'] = relation
# Delete remaining prefixes and sufixes (names)
table['Name'] = table['Name'].map(lambda x: re.sub('(\|N\|)|(\|n\|)|(\|M\|)|(\|F\|)|(\|U\|)','',str(x)))
return table
def names2relations(x):
# Identifying unknown gender in relations
relations_dict = mount_dict(aux_data,'relations&genders')
relations_dict = pd.DataFrame.from_dict(relations_dict,
orient = 'index',
columns = ['gender'])
relations_dict.reset_index(inplace = True)
unknown_relations_list = list(relations_dict[relations_dict['gender'] == 'U']['index'])
record_id = x['Record_ID']
text = x['ChartValue']
# Looking available info at net_table
temp = net_table[(net_table['Record_ID'] == record_id) &
(net_table['Name'] != 'NA')].reset_index(drop = True)
# Searching for each name individually. relation keeps prefixes and
# sufixes if the relation term doesn't tell gender
if len(temp) > 0:
for row in range(len(temp)):
name = temp.loc[row, 'Name']
relation01 = temp.loc[row, 'Relation']
relation02 = relation01[3:-3]
if relation02 in unknown_relations_list:
relation = relation01
else:
relation = relation02
# Substituting name per relation
text = re.sub('(?<![a-zA-Z0-9])'+name+'(?![a-zA-Z0-9])',
relation,
text)
return text
def lower_exclude(x):
# lowercasing
x = ' '.join([token.lower() for token in nltk.word_tokenize(x)])
# excluding numbers
x = re.sub('[0-9]', '', x)
# excluding punctiation
x = ' '.join([word for word in nltk.word_tokenize(x) if word not in set(string.punctuation)])
return x
def split_data(temp):
y_raw = temp['y'].copy()
X_raw = temp['X'].copy().astype(str)
X_train_validation, X_test, y_train_validation, y_test = train_test_split(X_raw,
y_raw,
test_size = 0.10,
random_state = 42,
stratify = y_raw)
X_train, X_validation, y_train, y_validation = train_test_split(X_train_validation,
y_train_validation,
test_size = 0.10,
random_state = 42,
stratify = y_train_validation)
return X_train_validation, X_train, X_validation, X_test, y_train_validation, y_train, y_validation, y_test
def get_params(X_train_validation, X_train, X_validation, X_test, temp):
X_train_validation_index = list(X_train_validation.reset_index()['index'])
X_train_index = list(X_train.reset_index()['index'])
X_validation_index = list(X_validation.reset_index()['index'])
X_test_index = list(X_test.reset_index()['index'])
X_train_validation_params = temp[temp.index.isin(X_train_validation_index)]['ParameterID'].reindex(index = X_train_validation.index)
X_train_params = temp[temp.index.isin(X_train_index)]['ParameterID'].reindex(index = X_train.index)
X_validation_params = temp[temp.index.isin(X_validation_index)]['ParameterID'].reindex(index = X_validation.index)
X_test_params = temp[temp.index.isin(X_test_index)]['ParameterID'].reindex(index = X_test.index)
X_train_validation_ID = temp[temp.index.isin(X_train_validation_index)]['Record_ID'].reindex(index = X_train_validation.index)
X_train_ID = temp[temp.index.isin(X_train_index)]['Record_ID'].reindex(index = X_train.index)
X_validation_ID = temp[temp.index.isin(X_validation_index)]['Record_ID'].reindex(index = X_validation.index)
X_test_ID = temp[temp.index.isin(X_test_index)]['Record_ID'].reindex(index = X_test.index)
return X_train_validation_params, X_train_params, X_validation_params, X_test_params, \
X_train_validation_ID, X_train_ID, X_validation_ID, X_test_ID
###################################################### Declaring Functions ####
### Main routine ##############################################################
# Registering initial time
a = time.time()
print("--start--")
# Open input file
data = pd.read_excel(io = input_file, sheet_name = 'Sheet1')
# Pre-processing steps, as presented in the original paper, are:
#--- Step 1 -------------------------------------------------------------------
print('Pre-Processing - Step #1...')
# Excluding duplicated data (same full record)
data = data.drop_duplicates().reset_index(drop = True)
# Ensuring that field 'ChartValue' contains only strings
data['ChartValue'] = data['ChartValue'].apply(lambda x: str(x))
# adjust string => 'abc123' => 'abc 123'
data['ChartValue'] = data['ChartValue'].apply(
lambda x: " ".join(
re.sub(r"([0-9]+(\.[0-9]+)?)",
r" \1 ",
x).strip().split()))
# adjust string => 'abc-' => 'abc -'
data['ChartValue'] = data['ChartValue'].apply(
lambda x: " ".join(
x.translate(
str.maketrans(
{key: " {0} ".format(key) for key in string.punctuation})).split()))
#------------------------------------------------------------------ Step 1 ----
#--- Step 2 -------------------------------------------------------------------
print('Pre-Processing - Step #2...')
# Loading dictionary to standardize relations. E.g.: "sis" >> "sister"
relations_dict = mount_dict(aux_data,'standardize_relations')
# standardizes relations
data['ChartValue'] = data['ChartValue'].apply(
lambda x: apply_dict(x, relations_dict))
#------------------------------------------------------------------ Step 2 ----
#--- Step 3 -------------------------------------------------------------------
print('Pre-Processing - Step #3...')
# spell checker step. On the original experiment, data was saved to a xlsx file
# and the spell checker from MS Excel was used to adjust text.
# The following lines can be use to save and load data
# To save it, use:
# data.to_excel('data_spell_check.xlsx')
# To read it, use:
# data = pd.read_excel('data_spell_check.xlsx')
#------------------------------------------------------------------ Step 3 ----
#--- Step 4 -------------------------------------------------------------------
print('Pre-Processing - Step #4...')
# Loading dictionary to adjust indirect relations
indirect_relations_dict = mount_dict(aux_data,'indirect_relations_dict')
# standardizes relations
data['ChartValue'] = data['ChartValue'].apply(
lambda x: apply_dict(x, indirect_relations_dict))
#------------------------------------------------------------------ Step 4 ----
#--- Step 5 -------------------------------------------------------------------
print('Pre-Processing - Step #5...')
# lemmatization of ChartValue
data['ChartValue'] = data['ChartValue'].apply(lambda x: lemmatization(x))
#------------------------------------------------------------------ Step 5 ----
#--- Step 6 -------------------------------------------------------------------
print('Pre-Processing - Step #6...')
# Filtering to keep only notes related to parameters:
# 4957: Family Quick View Summary
# 17202: Contact Information Family
net_data = data[(data['ParameterID'] == 4957) |
(data['ParameterID'] == 17202)].reset_index(drop=True).copy()
# Lowercase terms in lowercasing_list. It aims to help NER tager to correctly
# identify names in text
lowercasing_list = mount_lowercasing_list()
net_data['ChartValue'] = net_data['ChartValue'].apply(
lambda x: lowercase(x, lowercasing_list))
# Searching for names
net_data['ChartValue'] = net_data['ChartValue'].apply(
lambda x: search4names(x))
# Searching for relations
net_data['ChartValue'] = net_data['ChartValue'].apply(
lambda x: search4relations(x))
# Remove untagged tokens
net_data['ChartValue'] = net_data['ChartValue'].apply(
lambda x: remove_untagged(x))
# Excluding records where no entities were recognized
net_data = net_data[net_data.ChartValue.apply(lambda x: len(str(x)) > 0)]
net_data = net_data.reset_index(drop = True)
# Dropping unused columns and duplicated rows
net_data = net_data[['Record_ID','ChartValue']]
net_data = net_data.drop_duplicates().reset_index(drop = True)
# Creating network table
net_table = | pd.DataFrame(columns=['Record_ID','Name','Relation']) | pandas.DataFrame |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
# ***Important: still, need to install "crypto-news-api" package from PyPI first!
# first, activate cryptoalgowheel conda environment
# then: /anaconda3/envs/cryptoalgowheel/bin/pip install crypto-news-api
# %%
from crypto_news_api import CryptoControlAPI
import numpy as np
import pandas as pd
import json
from datetime import datetime
# %%
api = CryptoControlAPI("39b252536476db3482bab04acea666bc")
# %% [markdown]
# #### get top news (languages can be changed)
# %%
def top_news(api, language="en"):
topnews = api.getTopNews(language)
topnews = pd.DataFrame(topnews)
topnews.to_csv("./top_news.csv", index=False)
top_news(api)
# %% [markdown]
# #### get latest news (languages can be changed)
# %%
def latest_news(api, language="en"):
latest_news = api.getLatestNews(language)
latest_news = pd.DataFrame(latest_news)
latest_news.to_csv("./latest_news.csv", index=False)
latest_news(api, "ru")
# %% [markdown]
# #### get top news by category for "en" news (for "en" news currently 6 main categories: "Analysis", "Blockchain", "Exchanges", "Government", "Mining & ICOs" plus 1 "general" categories) (for all other languages than "en", all news classified as "general", so not focused here)
#
# %%
def top_news_by_category(api, language="en"):
topnews_by_cat = api.getTopNewsByCategory(language)
for k,v in topnews_by_cat.items():
current = pd.DataFrame(v)
current.to_csv("./top_news_"+str(k)+".csv", index=False)
top_news_by_category(api)
top_news_by_category(api, "jp")
# %% [markdown]
# #### get top news for a specific coin
# %%
def top_news_by_coin(api, coin, language="en"):
topnews_coin = api.getTopNewsByCoin(coin, language)
topnews_coin = pd.DataFrame(topnews_coin)
topnews_coin.to_csv("./top_news_"+str(coin)+".csv", index=False)
top_news_by_coin(api, "bitcoin")
# %% [markdown]
# #### get top news for a specific coin
# %%
def latest_news_by_coin(api, coin, language="en"):
latest_news_coin = api.getLatestNewsByCoin(coin, language)
latest_news_coin = | pd.DataFrame(latest_news_coin) | pandas.DataFrame |
# -*- coding: utf-8 -*-
#Classes and functions of XAS experiments
import datetime, numpy as np, operator, pandas as pd, matplotlib.pyplot as plt
from scipy import interpolate
# Parent Classes
class _DataFile():
"""Parent class for all XAS data files"""
filename = ""
shortname = ""
dataframe = ""
def plot(self, signal, color="red", legend=''):
if self.processed_dataframe is not None:
signal = self.processed_dataframe[signal].values
energy = self.processed_dataframe['Energy / eV'].values
elif self.dataframe is not None:
signal = self.dataframe[signal].values
energy = self.dataframe.index.values
else:
RaiseException('No dataframe found')
if legend:
plt.plot(energy, signal, linewidth = 1, label=legend, color=color)
plt.legend(loc = 2, frameon = False).draggable(True)
elif self.shortname:
if legend is None:
plt.plot(energy, signal, linewidth = 1, color=color)
else:
plt.plot(energy, signal, linewidth = 1, label=self.shortname, color=color)
plt.legend(loc = 2, frameon = False).draggable(True)
else:
plt.plot(energy, signal, linewidth = 1, label='no_label', color=color)
plt.legend(loc = 2, frameon = False).draggable(True)
# plt.axis([np.amin(energy), np.amax(energy), 0, np.amax(signal)*1.1])
# plt.subplots_adjust(hspace=0, wspace=0)
def _ScaleRef(self, column_id, name=None, divisor=None, trim="", smooth=None):
"""
Internal function. Scaled data between 1 and 0 irrespective of wave shape. Used for STD and TEY data analysis
"""
if name is None:
name = column_id
if trim:
new_frame = self.normalized_dataframe[column_id].iloc[trim[0]:trim[1]].copy()
else:
new_frame = self.normalized_dataframe[column_id].copy()
if divisor:
new_frame = new_frame/self.normalized_dataframe[divisor].copy()
if smooth:
new_frame = pd.Series.rolling(new_frame, window=smooth,center=True,min_periods=smooth).mean() # Updated due to future warning
#new_frame = pd.rolling_mean(new_frame, smooth, smooth, center=True) # Legacy
new_frame -= new_frame.min()
new_frame /= new_frame.max()
new_frame.columns = ['Rounded Energy / eV', name]
return new_frame
def _SumData(self):
"""
Internal function. Averages data across multiple _MDAFile objects
"""
normalized_dataframe = []
normalized_dataframe = | pd.concat([scan.dataframe for scan in self._MDAlist]) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Importing necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#get_ipython().run_line_magic('matplotlib', 'inline')
import warnings
warnings.filterwarnings('ignore')
# In[2]:
#Installing pmdarima package
#get_ipython().system(' pip install pmdarima')
# In[3]:
# Importing auto_arima
from pmdarima.arima import auto_arima
# In[4]:
#Read the sales dataset
sales_data = | pd.read_csv("Champagne Sales.csv") | pandas.read_csv |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", | Term("columns=A", encoding="ascii") | pandas.io.pytables.Term |
import os
import sys
import torch
import pickle
import argparse
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn as skl
import tensorflow as tf
from scipy.stats import gamma
from callbacks import RegressionCallback
from regression_data import generate_toy_data
from regression_models import GammaNormalRegression
def write_pickle(data, filename):
# Verify folder structure
is_filename_in_folder = len(filename.split('/')) > 1
if is_filename_in_folder:
assert os.path.exists(os.path.dirname(
filename)), f'The path {os.path.dirname(filename)} does not correspond to any existing path'
with open(filename, 'wb') as outfile:
pickle.dump(data, outfile)
return
class MeanVarianceLogger(object):
def __init__(self):
self.cols_data = ['Algorithm', 'Prior', 'x', 'y']
self.df_data = pd.DataFrame(columns=self.cols_data)
self.cols_eval = ['Algorithm', 'Prior', 'x', 'mean(y|x)', 'std(y|x)']
self.df_eval = | pd.DataFrame(columns=self.cols_eval) | pandas.DataFrame |
# Importem les llibreries
import numpy as np
import pandas as pd
# Llegim les dades
dades = pd.read_csv('data.csv', sep=";")
# Afegim l'atribut nombre d'acords
dades['N_acords'] = 1
# Transformem les variables quantitatives per als acords de més d'un país
atribut = ['','','Loc2ISO','Loc3ISO','Loc4ISO','Loc5ISO','Loc6ISO','Loc7ISO','Loc8ISO','Loc9ISO']
for i in range(2,9):
idx = np.logical_and(pd.notna(dades[atribut[i]]), pd.isna(dades[atribut[i+1]]))
dades.loc[idx,['DevSoc','DevHum','DevInfra','IntFu','N_acords']] = \
dades.loc[idx,['DevSoc','DevHum','DevInfra','IntFu','N_acords']] / i
# Acords amb 9 països
idx = | pd.notna(dades['Loc9ISO']) | pandas.notna |
import json
import os
import pandas as pd
from sqlalchemy import create_engine
from newstrends import utils
_ROOT_DIR = os.path.abspath(__file__ + "/../../../../../")
_ENGINE = None
def get_engine():
global _ENGINE
if _ENGINE is None:
db_info_path = os.path.join(_ROOT_DIR, 'data/db_info.json')
db_info = json.loads(open(db_info_path).read())
user = db_info['USER']
password = db_info['PASSWORD']
address = db_info['ADDRESS']
port = db_info['PORT']
db = db_info['DB']
_URL = f'mysql+pymysql://{user}:{password}@{address}:{port}/{db}?charset=utf8mb4'
_ENGINE = create_engine(_URL, echo=False, encoding='utf-8', pool_recycle=3600)
return _ENGINE
def create_news_table():
query = "create table if not exists news(" \
"`date` DATETIME not null, " \
"publisher VARCHAR(255), " \
"title VARCHAR(255), " \
"author VARCHAR(255), " \
"link VARCHAR(255) UNIQUE, " \
"description TEXT)"
get_engine().execute(query)
def select_all_titles(preprocess):
sql = 'select title from news where publisher != \'뉴시스\''
entries = get_engine().execute(sql)
titles = [e[0] for e in entries]
if preprocess:
titles = utils.preprocess(titles)
return titles
def select_articles(field=None, publishers=None, date_from=None):
if field is None:
field_str = '*'
elif isinstance(field, list):
field_str = ', '.join(field)
else:
field_str = field
sql = f'select {field_str} from news where publisher != \'뉴시스\''
if publishers is not None:
sql += ' and publisher in ({})'.format(
', '.join(f'\'{e}\'' for e in publishers))
if date_from is not None:
sql += ' and date >= "{}"'.format(date_from.strftime('%Y-%m-%d'))
entries = get_engine().execute(sql)
if isinstance(field, str):
return [e[0] for e in entries]
return [e for e in entries]
def update_scores(index, scores):
sql = 'update news ' \
'set pos_score = {},' \
' neu_score = {},' \
' neg_score = {} ' \
'where id = {}'.format(scores[0], scores[1], scores[2], index)
get_engine().execute(sql)
def read_publisher_links(publisher):
query = "select link from news where publisher='{}'"
fetched = get_engine().execute(query.format(publisher)).fetchall()
df = | pd.DataFrame(fetched, columns=['link']) | pandas.DataFrame |
import pandas as pd
import path_utils
from Evolve import Evolve, replot_evo_dict_from_dir
import traceback as tb
import os, json, shutil
import numpy as np
import matplotlib.pyplot as plt
import itertools
from copy import deepcopy
import pprint as pp
from tabulate import tabulate
import seaborn as sns
import shutil
import psutil, time
import ray
'''
This is very similar to Benchmark.py, but that one was designed (when I had a
previous setup in mind) to run a set of parameters MULTIPLE times each. I.e.,
it would create an Evolve object and do evo_obj.evolve() several times to create
a distribution. Now, there's no "time dependence", so we really just want to be
able to look at separate parameter settings, but only running them once each.
I'm also getting rid of the whole "solved" aspect for now because it's based
on numbers that are hard to explain, making it a bit pointless.
run_param_dict() is the most basic function, just doing an evolution for a passed
param_dict. Other functions basically involve calling it given various inputs.
'''
################################ Statistics functions
@path_utils.timer
def run_param_dict(param_dict, N_gen, N_trials, base_dir):
'''
Pass a single params dict to run an evolve() of, including the env_name.
Also pass an output_dir, or it will use the default output folder.
This only runs each setting ONCE.
'''
# deepcopy, just to be safer
params = deepcopy(param_dict)
assert 'env_name' in params.keys(), 'Must supply an env_name!'
env_name = params['env_name']
params.pop('env_name')
params['base_dir'] = base_dir
try:
# Run a single parameters setting
e = Evolve(env_name, **params)
evo_dict = e.evolve(N_gen, N_trials=N_trials, print_gen=True)
e.save_all_evo_stats(evo_dict, save_plots=True)
return evo_dict
except:
print(f'\n\nError in evolve with params: {params}. Traceback:\n')
print(tb.format_exc())
print('\n\nAttempting to continue...\n\n')
return {}
@ray.remote
def run_param_dict_wrapper(param_dict, N_gen, N_trials, base_dir):
# If a run_fname_label is provided, use that to create a more informative dir name.
# Otherwise, just use the date.
if 'run_fname_label' in param_dict.keys():
run_fname_label = param_dict['run_fname_label']
else:
run_fname_label = 'vary_params'
# Make plots for this params set
if 'run_plot_label' in param_dict.keys():
run_plot_label = param_dict['run_plot_label']
else:
run_plot_label = run_fname_label
# Run dir for this set of params
params_dir = os.path.join(base_dir, '{}_{}'.format(run_fname_label, path_utils.get_date_str()))
os.mkdir(params_dir)
# Doing this so it just saves directly to this dir, which has a more
# informative name than Evolve.__init__() would create.
param_dict['run_dir'] = params_dir
print('\n\nNow running with params:')
pp.pprint(param_dict, width=1)
print('\n\n')
stats_dict = run_param_dict(param_dict, N_gen, N_trials, base_dir)
return stats_dict
@path_utils.timer
def run_multi_envs(env_list, **kwargs):
'''
Iterates over a list of env names you give it,
running them and recording info.
'''
N_gen = kwargs.get('N_gen', 1000)
N_trials = kwargs.get('N_trials', 1000)
# Create dir for the results of this stats set.
stats_dir = os.path.join(path_utils.get_output_dir(), 'Stats_{}'.format(path_utils.get_date_str()))
os.mkdir(stats_dir)
# Dict to hold results on timing, etc.
stats_dict = {}
for env_name in env_list:
print(f'\nGetting stats for env {env_name} now...\n')
param_dict = deepcopy(kwargs)
param_dict['env_name'] = env_name
stats_dict[env_name] = run_param_dict(param_dict, N_gen, N_trials, stats_dir)
# Save distributions to file
with open(os.path.join(stats_dir, 'multi_env_stats.json'), 'w+') as f:
json.dump(stats_dict, f, indent=4)
def run_classic_control_envs(**kwargs):
'''
Loads gym_envs_info.json. This contains info about the envs we want to analyze.
It then calls run_multi_envs() for the classic control envs.
'''
with open(os.path.join(path_utils.get_src_dir(), 'gym_envs_info.json'), 'r') as f:
envs_dict = json.load(f)
env_list = [k for k,v in envs_dict.items() if v['env_type']=='classic_control']
print(f'Getting stats for: {env_list}')
run_multi_envs(env_list, **kwargs)
def run_param_dict_list(params_dict_list, **kwargs):
'''
Pass this a list of dicts, where each has the different parameters you want
to gather stats for.
It then iterates through this list, doing a run for each dict.
Note that it modifies the passed params_dict_list to add the results to it.
'''
# Create dir for the results of this stats run if one isn't provided.
stats_dir = kwargs.get('stats_dir', None)
if stats_dir is None:
stats_dir = os.path.join(path_utils.get_output_dir(), 'Stats_{}'.format(path_utils.get_date_str()))
os.mkdir(stats_dir)
# Produce results in parallel
for d in params_dict_list:
# For non-ray use
'''d['result'] = run_param_dict_wrapper( d,
kwargs.get('N_gen', 100),
kwargs.get('N_trials', 10),
stats_dir)'''
# For use with ray
d['result_ID'] = run_param_dict_wrapper.remote( d,
kwargs.get('N_gen', 100),
kwargs.get('N_trials', 10),
stats_dir)
# Retrieve results from ID
for d in params_dict_list:
d['stats_dict'] = ray.get(d['result_ID'])
d.pop('result_ID')
#d['stats_dict'] = d['result'] # for non-ray use
#d.pop('result')
# Return passed list, which should have dicts
# modified with the results
return params_dict_list
@path_utils.timer
def run_vary_params(constant_params_dict, vary_params_dict, **kwargs):
'''
This is a convenience function to easily vary parameters for analysis.
You pass it constant_params_dict, which is a dict with the values that
you want to remain constant between runs. Then, pass it vary_params_dict,
which should have each parameter that you want to vary as a list of the values
it should take.
Example:
constant_params_dict = {
'env_name' : 'CartPole-v0',
'N_gen' : 1000,
'N_dist' : 100,
'NN' : 'FFNN_multilayer'
}
vary_params_dict = {
'N_hidden_units' : [2, 4, 8],
'act_fn' : ['tanh', 'relu']
}
This will do 3*2 = 6 runs, for each of the combinations of varying parameters.
'''
# Create informative dir name
vary_params = list(vary_params_dict.keys())
stats_dir = os.path.join(
path_utils.get_output_dir(),
'Stats_vary_{}_{}'.format('_'.join(vary_params), path_utils.get_date_str()))
print(f'\nSaving statistics run to {stats_dir}')
os.mkdir(stats_dir)
# Create runs dir
all_runs_dir = os.path.join(stats_dir, 'all_runs')
print(f'\nSaving all runs to {all_runs_dir}')
os.mkdir(all_runs_dir)
# Create dict of const and vary params, as separate items
all_params = {
'const_params' : constant_params_dict,
'vary_params' : vary_params_dict
}
other_run_params = ['N_gen', 'N_trials']
for p in other_run_params:
if p in kwargs.keys():
all_params[p] = kwargs.get(p, None)
# Save params to file
with open(os.path.join(stats_dir, 'all_params.json'), 'w+') as f:
json.dump(all_params, f, indent=4)
# Flatten list, pass to other function
flat_param_list = vary_params_cross_products(constant_params_dict, vary_params_dict)
flat_param_list = run_param_dict_list(flat_param_list, stats_dir=all_runs_dir, **kwargs)
# Parse results
for d in flat_param_list:
# For now I'll still keep vary_params_stats.csv, but I think it's not
# actually necessary.
# Get rid of this now
d.pop('stats_dict')
# Save results to csv for later parsing/plotting
df = pd.DataFrame(flat_param_list)
print(tabulate(df, headers=df.columns.values, tablefmt='psql'))
df_fname = os.path.join(stats_dir, 'vary_params_stats.csv')
df.to_csv(df_fname, index=False)
################################# Plotting functions
def plot_all_agg_stats(stats_dir):
'''
For plotting all the heatmaps/etc for a stats_dir.
'''
agg_stats_dir = os.path.join(stats_dir, 'agg_stats')
if os.path.exists(agg_stats_dir):
shutil.rmtree(agg_stats_dir)
print(f'\nSaving all aggregate stats to {agg_stats_dir}')
os.mkdir(agg_stats_dir)
all_params_fname = os.path.join(stats_dir, 'all_params.json')
with open(all_params_fname, 'r') as f:
all_params_dict = json.load(f)
# Import all scores
all_scores_fname = os.path.join(stats_dir, 'all_scores.csv')
df = pd.read_csv(all_scores_fname)
vary_params_dict = all_params_dict['vary_params']
const_params_dict = all_params_dict['const_params']
vary_params = list(vary_params_dict.keys())
N_vary_params = len(vary_params)
# Only need to do if more than 2 params were varied.
if N_vary_params >= 2:
# Get envs info to find out percent of runs that "solved" the env.
with open(os.path.join(path_utils.get_src_dir(), 'gym_envs_info.json'), 'r') as f:
envs_dict = json.load(f)
# Create heatmap plots dir
heatmap_dir = os.path.join(agg_stats_dir, 'heatmap_plots')
if os.path.exists(heatmap_dir):
shutil.rmtree(heatmap_dir)
print(f'\nSaving heatmap plots to {heatmap_dir}')
os.mkdir(heatmap_dir)
# Iterate over all unique pairs of vary params, plot heatmaps of them
for pair in itertools.combinations(vary_params, 2):
print(f'Making heatmaps for {pair}')
other_params_flat = [(k, v) for k,v in vary_params_dict.items() if k not in pair]
other_params = [x[0] for x in other_params_flat]
other_vals = [x[1] for x in other_params_flat]
print(f'other params: {other_params}')
# Create dir for specific pivot
pivot_name = 'vary_{}_{}'.format(*pair)
pivot_dir = os.path.join(heatmap_dir, pivot_name)
os.mkdir(pivot_dir)
# Select for each of the combos of the other params.
for other_params_set in itertools.product(*other_vals):
# This part just selects for the rows that have the correct
# params/etc.
other_sel_dict = dict(zip(other_params, other_params_set))
fname_label = path_utils.param_dict_to_fname_str(other_sel_dict)
df_sel = df.loc[(df[list(other_sel_dict)] == pd.Series(other_sel_dict)).all(axis=1)]
df_no_scores = df_sel.drop('all_scores', axis=1)
#print(df_no_scores.columns.values)
df_params_only = df_no_scores.drop_duplicates()
all_row_dfs = []
# Iterate through table, for each run label, find its corresponding dir,
# walk through it, get all its scores, create a dataframe from them,
# then concatenate all these df's into a big one, that we can plot.
for index, row in df_params_only.iterrows():
# Only get the params varied, turn them into a dict
row_dict = row[df_no_scores.columns.values].to_dict()
df_row = df_sel.loc[(df[list(row_dict)] == pd.Series(row_dict)).all(axis=1)]
row_scores = df_row['all_scores'].values
row_dict['index'] = index
row_dict['mean_score'] = np.mean(row_scores)
row_dict['best_score'] = np.max(row_scores)
solved_reward = envs_dict[row_dict['env_name']]['solved_avg_reward']
N_solved_scores = np.sum(np.where(row_scores >= solved_reward))
row_dict['percent_solved_scores'] = N_solved_scores/len(row_scores)
# pandas has the nice perk that if you create a df from a dict where
# some of the entries are constants and one entry is a list, it duplicates
# the constant values.
row_df = | pd.DataFrame(row_dict, index=[index]) | pandas.DataFrame |
import sys, os
import numpy as np
import pandas as pd
import scipy.io as sio
# met mast functions and utilities
sys.path.append('../')
import met_funcs as MET
# import vis as vis
import utils as utils
import pickle as pkl
import time
# paths (must mount volume smb://nrel.gov/shared/wind/WindWeb/MetData/135mData/)
towerID = 'M5'
metDataPath = '/Volumes/135mData/{}Twr/20Hz/mat/'.format(towerID)
savepath = '/Users/nhamilto/Documents/Wake_Dynamics/SiteChar/data/IEC_4'
try:
os.makedirs(savepath)
except:
pass
# setup IEC parameters for NWTC
params = MET.setup_IEC_params()
# load some threshold data
alpha_pos = np.load(
'/Users/nhamilto/Documents/Wake_Dynamics/SiteChar/data/pos_alpha_limit.npy'
)
alpha_neg = np.load(
'/Users/nhamilto/Documents/Wake_Dynamics/SiteChar/data/neg_alpha_limit.npy'
)
alpha_reference_velocity = np.load(
'/Users/nhamilto/Documents/Wake_Dynamics/SiteChar/data/alpha_reference_velocity.npy'
)
#### Detect over given date range
# time range
years = [int(a) for a in np.arange(2014, 2019, 1)] #
months = [int(a) for a in np.arange(1, 12.1, 1)]
days = [int(a) for a in np.arange(1, 31.1, 1)]
# years = [2015] #
# months = [1, 2, 3]
# days = [int(a) for a in np.arange(1, 31.1, 1)]
yearmonth = []
for year in years:
for month in months:
yearmonth.extend([(year, month)])
for year, month in yearmonth:
# timing switch
start_time = time.time()
if (year == 2014) & (month in list((range(5)))):
continueh
# begin empty dataframes for events
Ve01events = pd.DataFrame()
Ve50events = pd.DataFrame()
EOGevents = | pd.DataFrame() | pandas.DataFrame |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
import pyspark.pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class CategoricalTest(PandasOnSparkTestCase, TestUtils):
@property
def pdf(self):
return pd.DataFrame(
{
"a": | pd.Categorical([1, 2, 3, 1, 2, 3]) | pandas.Categorical |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename, 'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
self.assertRaises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
self.assertRaises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with tm.assertRaises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_describe(self):
self.series.describe()
self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count': 7, 'unique': 4,
'top': 'a', 'freq': 3}, index=result.index)
assert_series_equal(result, expected)
dt = list(self.ts.index)
dt.append(dt[0])
ser = Series(dt)
rs = ser.describe()
min_date = min(dt)
max_date = max(dt)
xp = Series({'count': len(dt),
'unique': len(self.ts.index),
'first': min_date, 'last': max_date, 'freq': 2,
'top': min_date}, index=rs.index)
assert_series_equal(rs, xp)
def test_describe_empty(self):
result = pd.Series().describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
nanSeries = Series([np.nan])
nanSeries.name = 'NaN'
result = nanSeries.describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
def test_describe_none(self):
noneSeries = Series([None])
noneSeries.name = 'None'
expected = Series([0, 0], index=['count', 'unique'], name='None')
assert_series_equal(noneSeries.describe(), expected)
class TestDataFrame(tm.TestCase, Generic):
_typ = DataFrame
_comparator = lambda self, x, y: assert_frame_equal(x, y)
def test_rename_mi(self):
df = DataFrame([
11, 21, 31
], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]))
df.rename(str.lower)
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
self.assertTrue(df.bool())
df = DataFrame([[False]])
self.assertFalse(df.bool())
df = DataFrame([[False, False]])
self.assertRaises(ValueError, lambda: df.bool())
self.assertRaises(ValueError, lambda: bool(df))
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({'A': [1, '2', 3.]})
result = o._get_numeric_data()
expected = | DataFrame(index=[0, 1, 2], dtype=object) | pandas.DataFrame |
import argparse
import os
import re
from pathlib import Path
import pandas as pd
import json
import matplotlib.pyplot as plt
def lastmatch_file(log, match_str):
lastmatch = None
for line in log:
if match_str in line:
lastmatch = line
return lastmatch
def load_dict(line):
json_string = re.findall('{.*}', line)[0].replace("'", "\"")
obj = json.loads(json_string)
return obj
def read_actions(log):
model_actions_match = "INFO:ml.rl.training.loss_reporter:The distribution of model actions :"
logged_actions_match = "INFO:ml.rl.training.loss_reporter:The distribution of logged actions :"
file = open(log, 'r')
file = file.readlines()
model_actions = load_dict(lastmatch_file(file, model_actions_match))
logged_actions = load_dict(lastmatch_file(file, logged_actions_match))
return logged_actions, model_actions
def load_model(path):
logged_act, model_act = read_actions(path)
ds = [logged_act, model_act]
data = {}
for k in logged_act.keys():
data[k] = list(ds_elem[k] for ds_elem in ds)
df = pd.DataFrame.from_dict(data, orient="index", columns=["Logged", "Model"])
return df
def actions_comparison_bar(path, output="/tmp", use_pkl=False):
if isinstance(path, list):
# Multiplos modelos
df_list = []
for num, dirname in enumerate(path):
if use_pkl:
tmp_df = | pd.read_pickle(dirname) | pandas.read_pickle |
"""Performance visualization class"""
import os
from dataclasses import dataclass, field
from typing import Dict, List
import pandas as pd
import seaborn as sns
import scikit_posthocs as sp
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot
import matplotlib.pylab as plt
from tqdm import tqdm
from src.data import Data
@dataclass
class VizMetrics(Data):
"""Generates plots to visualize models performance.
This object generates performance plots to compare different spatial
cross-validation approaches.
Attributes
----------
root_path : str
Root path
index_col : str
The metrics csv index column name
fs_method : str
The feature selection method used
ml_method : str
The machine learning method used
"""
cv_methods: List = field(default_factory=list)
index_col: str = None
fs_method: str = None
ml_method: str = None
cv_methods_path: List = field(default_factory=list)
cv_methods_results: Dict = field(default_factory=dict)
def init_methods_path(self):
"""Initialize spatial cv folder paths"""
self.cv_methods_path = [
os.path.join(
self.root_path,
"results",
method,
"evaluations",
self.fs_method,
self.ml_method,
"metrics.csv",
)
for method in self.cv_methods
]
def load_cv_results(self):
"""Load metric results from each spatial cv being considered"""
for data_path, method in zip(self.cv_methods_path, self.cv_methods):
self.cv_methods_results[method] = pd.read_csv(
data_path, index_col=self.index_col
)
def generate_metric_df(self, metric):
"""Generates a dataframe for a given metric"""
index_fold = self.cv_methods_results["Optimistic"].index
metric_df = pd.DataFrame(columns=self.cv_methods, index=index_fold)
for cv_method, results in self.cv_methods_results.items():
metric_df[cv_method] = results[metric]
metric_df.index = metric_df.index.astype(str)
return metric_df
def generate_metric_plot(self):
"""Generates plots for the performance metrics"""
sns.set(font_scale=2.2)
sns.set_style("whitegrid", {"axes.grid": False})
rmse = self.generate_metric_df(metric="RMSE")
features = self.generate_metric_df(metric="N_FEATURES")
instances = self.generate_metric_df(metric="TRAIN_SIZE")
metrics = {"rmse": rmse, "features": features, "instances": instances}
with PdfPages(os.path.join(self.cur_dir, "metrics.pdf")) as pdf_pages:
for metric_name, metric in tqdm(metrics.items(), desc="Generating plots"):
fig, fig_ax = pyplot.subplots(figsize=(20, 5))
plt.xticks(rotation=45)
fig_ax.set(ylabel="")
fig_ax.set_title(metric_name.upper())
sns.lineplot(
data=metric,
markers=True,
err_style="bars",
ax=fig_ax,
dashes=True,
linewidth=5,
palette=[
"#16b004",
"#6e1703",
"#6e1703",
"#6e1703",
"#6e1703",
"#6e1703",
"#6e1703",
"#f8ff0a",
],
# palette="Set1"
)
fig_ax.legend(
bbox_to_anchor=(0.5, -1.0),
loc="lower center",
ncol=4,
borderaxespad=0.0,
)
pdf_pages.savefig(fig, bbox_inches="tight")
def generate_mean_table(self):
"""Generates the mean performance for each spatial cv approache"""
self.logger_info("Generating mean table.")
columns = self.cv_methods_results["Optimistic"].columns.values.tolist()
columns_std = [f"{col}_std" for col in columns]
columns = columns + columns_std
columns = columns.sort()
mean_df = pd.DataFrame(columns=columns, index=self.cv_methods)
for method, results in self.cv_methods_results.items():
describe_df = results.describe()
for col in results.columns:
mean_df.loc[
method, col
] = f"{describe_df.loc['mean', col]} ({describe_df.loc['std', col]})"
mean_df.T.to_csv(os.path.join(self.cur_dir, "mean_metrics.csv"))
def tukey_post_hoc_test(self, metric):
"""Generate post hoc statistics"""
metric_df = | pd.DataFrame(columns=self.cv_methods) | pandas.DataFrame |
import pandas as pd
import pytest
import helpers.unit_test as hut
import im.common.data.types as icdtyp
import im.kibot.data.load as vkdloa
class TestKibotS3DataLoader(hut.TestCase):
def setUp(self) -> None:
super().setUp()
self._s3_data_loader = vkdloa.KibotS3DataLoader()
@pytest.mark.slow
def test1(self) -> None:
df = self._s3_data_loader._read_data(
symbol="XG",
asset_class=icdtyp.AssetClass.Futures,
frequency=icdtyp.Frequency.Daily,
contract_type=icdtyp.ContractType.Continuous,
)
self.check_string(df.head(10).to_string())
@pytest.mark.skip(reason="Not implemented yet")
def test_read_data_with_start_end_ts(self) -> None:
"""
Test correctness of hourly ES data loading.
"""
# Load data.
data = self._s3_data_loader._read_data(
symbol="XG",
asset_class=icdtyp.AssetClass.Futures,
frequency=icdtyp.Frequency.Daily,
contract_type=icdtyp.ContractType.Continuous,
start_ts= | pd.to_datetime("1990-12-28 00:00:00") | pandas.to_datetime |
from os import listdir
from os.path import isfile, join
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Concatenate, Lambda, Average, Dropout
# Relation Network functions.
def get_dense(n, units):
r = []
for k in range(n):
r.append(Dense(units, activation='relu'))
return r
def get_MLP(n, denses):
def g(x):
d = x
for k in range(n):
d = denses[k](d)
return d
return g
def dropout_dense(x, units):
y = Dense(units, activation='relu')(x)
y = Dropout(0.5)(y)
return y
def build_tag(conv):
d = K.int_shape(conv)[2]
tag = np.zeros((d,d,2))
for i in range(d):
for j in range(d):
tag[i,j,0] = float(int(i%d))/(d-1)*2-1
tag[i,j,1] = float(int(j%d))/(d-1)*2-1
tag = K.variable(tag)
tag = K.expand_dims(tag, axis=0)
batch_size = K.shape(conv)[0]
tag = K.tile(tag, [batch_size,1,1,1])
return tag
def slice_1(t):
return t[:, 0, :, :]
def slice_2(t):
return t[:, 1:, :, :]
def slice_3(t):
return t[:, 0, :]
def slice_4(t):
return t[:, 1:, :]
def make_ResNet50_relnet(dataset='SVRT',
resnet_layer='last_size_8',
trainable=False):
# Inputs.
image = Input((128, 128, 3))
if dataset=='sort-of-clevr':
question = Input((11,))
elif dataset=='SVRT':
question = Input((2,)) # same-different ([1, 0]) or relative position ([0, 1]).
else:
raise ValueError('dataset not supported!')
# Get CNN features.
base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=image)
if resnet_layer=='last_size_4':
layer_name = 'conv5_block3_out' # shape: (None, 4, 4, 2048)
elif resnet_layer=='last_size_8':
layer_name = 'conv4_block6_out' # shape: (None, 8, 8, 1024)
else:
raise ValueError('layer not supported!')
cnn_features = base_model.get_layer(layer_name).output
# Freeze the base_model.
base_model.trainable = trainable
# Make tag and append to cnn features.
tag = build_tag(cnn_features)
cnn_features = Concatenate()([cnn_features, tag])
# Make list with objects.
shapes = cnn_features.shape
w, h = shapes[1], shapes[2]
slice_layer1 = Lambda(slice_1)
slice_layer2 = Lambda(slice_2)
slice_layer3 = Lambda(slice_3)
slice_layer4 = Lambda(slice_4)
features = []
for k1 in range(w):
features1 = slice_layer1(cnn_features)
cnn_features = slice_layer2(cnn_features)
for k2 in range(h):
features2 = slice_layer3(features1)
features1 = slice_layer4(features1)
features.append(features2)
# Make list with all combinations of objects.
relations = []
concat = Concatenate()
for feature1 in features:
for feature2 in features:
relations.append(concat([feature1, feature2, question]))
# g function.
g_MLP = get_MLP(4, get_dense(4, units=512))
mid_relations = []
for r in relations:
mid_relations.append(g_MLP(r))
combined_relation = Average()(mid_relations)
# f function.
rn = Dense(512, activation='relu')(combined_relation)
rn = dropout_dense(rn, units=512)
# Output.
if dataset == 'sort-of-clevr':
output_units = 10
answer = Dense(output_units, activation='softmax')(rn)
elif dataset == 'SVRT':
output_units = 1
answer = Dense(output_units, activation='sigmoid')(rn)
model = Model(inputs=[image, question], outputs=answer)
return base_model, model
def fine_tune_relnet(train_ds,
val_ds,
save_name,
resnet_layer,
epochs_top,
epochs,
steps_per_epoch,
validation_steps,
n=10,
lr=0.0001):
# Repate trainign 10 times.
for i in range(n):
# Define model.
base_model, model = make_ResNet50_relnet(
dataset='SVRT',
resnet_layer=resnet_layer,
trainable=False)
# Compile.
model.compile(
optimizer=tf.keras.optimizers.Adam(0.0003),
loss='binary_crossentropy',
metrics=['accuracy'])
# Train.
model.fit(
train_ds,
epochs=epochs_top,
steps_per_epoch=steps_per_epoch,
validation_data=val_ds,
validation_steps=validation_steps)
# Unfreeze Resnet50.
base_model.trainable = True
# Re-compile.
model.compile(
optimizer=tf.keras.optimizers.Adam(lr),
loss='binary_crossentropy',
metrics=['accuracy'])
# Train.
model.fit(
train_ds,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_data=val_ds,
validation_steps=validation_steps)
# Save weights.
weights_name = save_name + 'model_' + str(i) + '.hdf5'
model.save_weights(weights_name)
return
def get_dataset(batch_size, tfrecord_dir, is_training=True, process_img=True, relnet=False):
# Load dataset.
if type(tfrecord_dir) == list:
raw_image_dataset = tf.data.TFRecordDataset(tfrecord_dir, num_parallel_reads=2)
else:
raw_image_dataset = tf.data.TFRecordDataset(tfrecord_dir)
# Define example reading function.
def read_tfrecord(serialized_example):
# Create a dictionary describing the features.
feature_description = {
'label': tf.io.FixedLenFeature([], tf.int64),
'image_raw': tf.io.FixedLenFeature([], tf.string),
'coordinates': tf.io.FixedLenFeature([], tf.string),
'relative_position': tf.io.FixedLenFeature([], tf.int64)}
# Parse example.
example = tf.io.parse_single_example(serialized_example, feature_description)
# Cast label to int64
label = example['label']
label = tf.cast(label, tf.int64)
# Get image.
image = tf.image.decode_png(example['image_raw'])
# Ensure shape dimensions are constant.
image = tf.reshape(image, [128, 128, 3])
# Process image.
if process_img:
image = tf.cast(image, tf.float64)
image /= 255.0
# Sample-wise center image.
mean = tf.reduce_mean(image)
image -= mean
# Sample-wise std normalization.
std = tf.math.reduce_std(image)
image /= std
# Get coordinates.
b_coors = example['coordinates']
coors = tf.io.parse_tensor(b_coors, out_type=tf.float64) # restore 2D array from byte string
coors = tf.reshape(coors, [4])
# Cast relative position to int64
rel_pos = example['relative_position']
rel_pos = tf.cast(rel_pos, tf.int64)
if relnet:
question = tf.constant([1, 0], dtype=tf.int64)
return (image, question), label
else:
return image, label
# Parse dataset.
dataset = raw_image_dataset.map(read_tfrecord)
# Always shuffle for simplicity.
dataset = dataset.shuffle(5600)
if is_training:
# Infinite dataset to avoid the potential last partial batch in each epoch.
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
return dataset
def test_relnet_auc(ds, weights_dir, resnet_layer, model_name, condition):
"""Tests 10 versions of a single relation net model in a single condition using the area under the ROC curve.
Args:
ds: dataset with cases from the 'same' and 'different' classes.
weights_dir: directory of models weights to test.
resnet_layer: layer of ResNet50 to use.
model_name: model name. String.
condition: condition name. String.
Returns:
A list with test data: model_name, condition, area under the ROC curve.
"""
# Define model architecture.
base_model, model = make_ResNet50_relnet(
dataset='SVRT',
resnet_layer=resnet_layer,
trainable=False)
# Compile: set metric to auc (default is area under the ROC curve).
model.compile(
optimizer=tf.keras.optimizers.Adam(0.0003),
loss='binary_crossentropy',
metrics=[tf.keras.metrics.AUC()])
# Get list of weights from path.
weights_paths = [join(weights_dir, f) for f in listdir(weights_dir) if isfile(join(weights_dir, f))]
# Test each model.
models_data = []
for w_path in weights_paths:
model.load_weights(w_path)
metrics = model.evaluate(ds)
models_data.append([model_name, condition, metrics[1]])
return models_data
def test_all_relnets_auc(ds, condition):
results = []
# last_size_4.
last_size_4_data = test_relnet_auc(
ds=ds,
weights_dir='simulation_1/RN_4x4',
resnet_layer='last_size_4',
model_name='RN_4x4',
condition=condition)
results.extend(last_size_4_data)
# last_size_8.
last_size_8_data = test_relnet_auc(
ds=ds,
weights_dir='simulation_1/RN_8x8',
resnet_layer='last_size_8',
model_name='RN_8x8',
condition=condition)
results.extend(last_size_8_data)
# Make pandas dataframe and return.
return | pd.DataFrame(results, columns=['Model', 'Condition', 'AUC']) | pandas.DataFrame |
from django.http import (
HttpResponse,
JsonResponse
)
from django.http.response import (
HttpResponseRedirect,
HttpResponseForbidden,
Http404
)
from django.shortcuts import (
get_object_or_404,
render_to_response,
render,
redirect
)
from django.core.files.storage import FileSystemStorage
from django.core.files.move import file_move_safe
from django.contrib.auth.models import User
from django.apps import apps
from fnmatch import fnmatch
from whatisit.settings import (
MEDIA_ROOT,
MEDIA_URL
)
from whatisit.apps.wordfish.utils import (
get_collection_annotators,
get_report_collection,
get_report_set,
get_reportset_annotations
)
from whatisit.apps.wordfish.models import ReportSet
import pandas
import errno
import itertools
import os
import tempfile
############################################################################
# Exporting Data
############################################################################
def download_annotation_set_json(request,sid,uid):
'''a wrapper view for download annotation_set, setting return_json to True
:param sid: the report set id
:param uid: the user id to download
'''
return download_annotation_set(request,sid,uid,return_json=True)
def download_annotation_set(request,sid,uid,return_json=False):
'''download annotation set will download annotations for a particular user
and report set. Default returns a text/csv file, if return_json is true,
returns json file
:param sid: the report set id
:param uid: the user id to download
:param return_json: return a Json response instead (default is False)
'''
report_set = get_report_set(sid)
collection = report_set.collection
# Does the user have permission to edit?
requester = request.user
if requester == collection.owner or requester in collection.contributors.all():
user = User.objects.get(id=uid)
df = get_reportset_annotations(report_set,user)
if not return_json:
response = HttpResponse(df.to_csv(sep="\t"), content_type='text/csv')
export_name = "%s_%s_annotations.tsv" %(report_set.id,user.username)
response['Content-Disposition'] = 'attachment; filename="%s"' %(export_name)
return response
return JsonResponse(df.to_json(orient="records"))
messages.info(request,"You do not have permission to perform this action.")
return redirect('report_collection_details',cid=collection.id)
def download_data(request,cid):
'''download data returns a general view for a collection to download data,
meaning all reports, reports for a set, or annotations for a set
:param cid: the collection id
'''
collection = get_report_collection(cid)
# Does the user have permission to edit?
requester = request.user
if requester == collection.owner or requester in collection.contributors.all():
context = {"collection":collection,
"annotators":get_collection_annotators(collection),
"report_sets":ReportSet.objects.filter(collection=collection)}
return render(request, 'export/download_data.html', context)
messages.info(request,"You do not have permission to perform this action.")
return redirect('report_collection_details',cid=collection.id)
def download_reports_json(request,cid,sid=None):
'''download_reports_json is a wrapper for download_reports, ensuring
that a json response is returned
:param cid: the collection id
:param sid: the report set if, if provided use that report set.
'''
return download_reports(request,cid,sid=sid,return_json=True)
def download_reports(request,cid,sid=None,return_json=False):
'''download reports returns a tsv download for an entire collection of reports (not recommended)
or for reports within a collection (recommended)
:param cid: the collection id
:param sid: the report set if, if provided use that report set.
:param return_json: return a Json response instead (default is False)
'''
if sid != None:
report_set = get_report_set(sid)
collection = report_set.collection
reports = report_set.reports.all()
export_name = "%s_reports_set.tsv" %(report_set.id)
else:
collection = get_report_collection(cid)
reports = collection.report_set.all()
export_name = "%s_reports.tsv" %(collection.id)
# Does the user have permission to edit?
requester = request.user
if requester == collection.owner or requester in collection.contributors.all():
df = | pandas.DataFrame(columns=["report_id","report_text"]) | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas import Series
from pandas.util.testing import assert_frame_equal
from pandas.util.testing import assert_series_equal
from wreckognize.sensitive_dataframe import SensitiveFrame
from wreckognize.sensitive_dataframe import SensitiveSeries
@pytest.fixture
def sample_df():
return DataFrame(
[
['crusher', 'commander', 'woman', 'human'],
['worf', 'lieutenant', 'man', 'klingon']
],
columns=['name', 'rank', 'gender', 'species']
)
@pytest.fixture
def sample_sf():
return SensitiveFrame(
[
['riker', 'commander', 'man', 'human'],
['ro', 'ensign', 'woman', 'bejoran']
],
columns=['name', 'rank', 'gender', 'species'],
quasi_identifiers=['rank', 'gender'],
sensitive_data=['species']
)
@pytest.fixture
def sample_sf_two():
return SensitiveFrame(
[['starfleet', 2335], ['starfleet', 2340]],
columns=['organization', 'born'],
quasi_identifiers=['born'],
sensitive_data=['born']
)
@pytest.fixture
def sample_sf_three():
return SensitiveFrame(
[['william', 'red'], ['laren', 'red']],
columns=['given_name', 'uniform'],
quasi_identifiers=['uniform'],
sensitive_data=['given_name']
)
@pytest.fixture
def sample_right_sf():
return SensitiveFrame(
[['riker', 'starfleet', 2335],['ro', 'starfleet', 2340]],
columns=['name', 'organization', 'born'],
quasi_identifiers=['name', 'born'],
sensitive_data=['born']
)
class TestSensitiveSeries:
def test_init_sets_boolean_metadata(self):
test_ss = SensitiveSeries(name='a', is_quasi_identifier=True)
assert test_ss.is_quasi_identifier
assert not test_ss.is_sensitive_data
def test_metadata_properties(self):
test_ss = SensitiveSeries(name='a', is_quasi_identifier=True)
assert test_ss.quasi_identifiers == ['a']
assert test_ss.sensitive_data == []
class TestSensitiveFrame:
def test_init_sets_quasi_identifiers(self):
test_sf = SensitiveFrame(
columns=['a', 'b'],
quasi_identifiers=['a', 'b']
)
assert test_sf.quasi_identifiers == ['a', 'b']
def test_init_sets_sensitive_data(self):
test_sf = SensitiveFrame(
columns=['c', 'd'],
sensitive_data=['c', 'd']
)
assert test_sf.sensitive_data == ['c', 'd']
def test_init_sets_multiple_metadata(self):
test_sf = SensitiveFrame(
columns=['a', 'b'],
sensitive_data=['a'],
quasi_identifiers=['b']
)
assert test_sf.sensitive_data == ['a']
assert test_sf.quasi_identifiers == ['b']
def test_init_selects_metadata(self):
test_sf = SensitiveFrame(
columns=['a'],
quasi_identifiers=['a', 'b']
)
assert test_sf.quasi_identifiers == ['a']
def test_init_with_df_keeps_data(self, sample_df):
test_sf = SensitiveFrame(sample_df)
expected_df = sample_df
assert_frame_equal(test_sf, expected_df)
def test_init_cleans_sf_metadata(self, sample_sf):
test_sf = SensitiveFrame(sample_sf)
assert test_sf.quasi_identifiers == []
assert test_sf.sensitive_data == []
def test_simple_join(self, sample_sf, sample_sf_two):
test_sf = sample_sf.join(sample_sf_two)
expected_df = DataFrame(
[
['riker', 'commander', 'man', 'human', 'starfleet', 2335],
['ro', 'ensign', 'woman', 'bejoran', 'starfleet', 2340]
],
columns=['name', 'rank', 'gender', 'species', 'organization', 'born']
)
assert test_sf.quasi_identifiers == ['rank', 'gender', 'born']
assert test_sf.sensitive_data == ['species', 'born']
assert_frame_equal(test_sf, expected_df)
def test_join_list(self, sample_sf, sample_sf_two, sample_sf_three):
test_sf = sample_sf.join([sample_sf_two, sample_sf_three])
expected_df = DataFrame(
[
['riker', 'commander', 'man', 'human', 'starfleet', 2335, 'william', 'red'],
['ro', 'ensign', 'woman', 'bejoran', 'starfleet', 2340, 'laren', 'red']
],
columns=['name', 'rank', 'gender', 'species', 'organization', 'born', 'given_name', 'uniform']
)
assert test_sf.quasi_identifiers == ['rank', 'gender', 'born', 'uniform']
assert test_sf.sensitive_data == ['species','born','given_name']
assert_frame_equal(test_sf, expected_df)
def test_mixed_join(self, sample_sf_three, sample_df, sample_sf_two):
test_sf = sample_sf_three.join([sample_df, sample_sf_two])
expected_df = DataFrame(
[
['william', 'red', 'crusher', 'commander', 'woman', 'human', 'starfleet', 2335],
['laren', 'red', 'worf', 'lieutenant', 'man', 'klingon', 'starfleet', 2340]
],
columns=['given_name', 'uniform', 'name', 'rank', 'gender', 'species', 'organization', 'born']
)
assert_frame_equal(test_sf, expected_df)
assert test_sf.quasi_identifiers == ['uniform', 'born']
assert test_sf.sensitive_data == ['given_name', 'born']
def test_join_non_metadata(self):
non_sf = SensitiveFrame(columns=['empty'])
test_sf = SensitiveFrame(columns=['void']).join(non_sf)
assert test_sf.quasi_identifiers == []
assert test_sf.sensitive_data == []
def test_rename_columns(self, sample_sf_three):
test_sf = sample_sf_three.rename(columns={'given_name': 'first_name', 'uniform': 'specialty'})
assert test_sf.sensitive_data == ['first_name']
assert test_sf.quasi_identifiers == ['specialty']
def test_rename_inplace(self, sample_sf_three):
sample_sf_three.rename(columns={'given_name': 'first_name'}, inplace=True)
assert sample_sf_three.columns.tolist() == ['first_name', 'uniform']
assert sample_sf_three.sensitive_data == ['first_name']
def test_rename_non_metadata(self):
non_sf = SensitiveFrame(columns=['empty'])
test_sf = non_sf.rename(columns={'empty': 'void'})
assert test_sf.columns.tolist() == ['void']
assert test_sf.quasi_identifiers == []
assert test_sf.sensitive_data == []
def test_rename_double_metadata(self):
non_sf = SensitiveFrame(
columns=['empty'],
sensitive_data=['empty'],
quasi_identifiers=['empty']
)
test_sf = non_sf.rename(columns={'empty': 'void'})
assert test_sf.columns.tolist() == ['void']
assert test_sf.quasi_identifiers == ['void']
assert test_sf.sensitive_data == ['void']
def test_simple_drop(self, sample_sf):
test_sf = sample_sf.drop('rank', axis=1)
assert test_sf.columns.tolist() == ['name', 'gender', 'species']
assert test_sf.quasi_identifiers == ['gender']
assert test_sf.sensitive_data == ['species']
def test_drop_list(self, sample_sf):
test_sf = sample_sf.drop(['rank','species'], axis=1)
assert test_sf.columns.tolist() == ['name', 'gender']
assert test_sf.quasi_identifiers == ['gender']
assert test_sf.sensitive_data == []
def test_drop_inplace(self, sample_sf):
sample_sf.drop('rank', axis=1, inplace=True)
assert sample_sf.columns.tolist() == ['name', 'gender', 'species']
assert sample_sf.quasi_identifiers == ['gender']
assert sample_sf.sensitive_data == ['species']
def test_simple_get(self, sample_sf):
test_sf = sample_sf['rank']
expected_df = Series(['commander', 'ensign'], name='rank')
assert_series_equal(test_sf, expected_df)
assert test_sf.quasi_identifiers == ['rank']
def test_get_list(self, sample_sf):
test_sf = sample_sf[['name', 'rank', 'species']]
expected_df = DataFrame([['riker', 'commander', 'human'],['ro', 'ensign', 'bejoran']],
columns=['name', 'rank', 'species']
)
assert_frame_equal(test_sf, expected_df)
assert test_sf.quasi_identifiers == ['rank']
assert test_sf.sensitive_data == ['species']
def test_get_non_metadata(self, sample_sf):
test_sf = sample_sf['name']
expected_df = Series(['riker', 'ro'], name='name')
assert_series_equal(test_sf, expected_df)
assert test_sf.quasi_identifiers == []
def test_get_double_metadata(self, sample_sf_two):
test_sf = sample_sf_two['born']
assert test_sf.quasi_identifiers == ['born']
assert test_sf.sensitive_data == ['born']
def test_get_and_set_metadata(self, sample_sf):
test_sf = SensitiveFrame()
test_sf['grade'] = sample_sf['rank']
expected_df = DataFrame(['commander', 'ensign'], columns=['grade'])
assert_frame_equal(test_sf, expected_df)
assert test_sf.quasi_identifiers == ['grade']
def test_get_and_set_non_metadata(self, sample_sf):
test_sf = SensitiveFrame()
test_sf['person'] = sample_sf['name']
expected_df = DataFrame(['riker', 'ro'], columns=['person'])
assert_frame_equal(test_sf, expected_df)
assert test_sf.sensitive_data == []
def test_get_and_set_from_df(self, sample_df):
test_sf = SensitiveFrame()
test_sf['grade'] = sample_df['rank']
expected_df = DataFrame(['commander', 'lieutenant'], columns=['grade'])
assert_frame_equal(test_sf, expected_df)
assert test_sf.quasi_identifiers == []
def test_get_and_set_double_metadata(self, sample_sf_two):
test_sf = SensitiveFrame()
test_sf['birth_year'] = sample_sf_two['born']
expected_df = | DataFrame([2335, 2340], columns=['birth_year']) | pandas.DataFrame |
from datetime import (
datetime,
time,
)
import numpy as np
import pytest
from pandas._libs.tslibs import timezones
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
class TestBetweenTime:
@td.skip_if_has_locale
def test_between_time_formats(self, frame_or_series):
# GH#11818
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
if frame_or_series is Series:
ts = ts[0]
strings = [
("2:00", "2:30"),
("0200", "0230"),
("2:00am", "2:30am"),
("0200am", "0230am"),
("2:00:00", "2:30:00"),
("020000", "023000"),
("2:00:00am", "2:30:00am"),
("020000am", "023000am"),
]
expected_length = 28
for time_string in strings:
assert len(ts.between_time(*time_string)) == expected_length
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_localized_between_time(self, tzstr, frame_or_series):
tz = timezones.maybe_get_tz(tzstr)
rng = date_range("4/16/2012", "5/1/2012", freq="H")
ts = Series(np.random.randn(len(rng)), index=rng)
if frame_or_series is DataFrame:
ts = ts.to_frame()
ts_local = ts.tz_localize(tzstr)
t1, t2 = time(10, 0), time(11, 0)
result = ts_local.between_time(t1, t2)
expected = ts.between_time(t1, t2).tz_localize(tzstr)
tm.assert_equal(result, expected)
assert timezones.tz_compare(result.index.tz, tz)
def test_between_time_types(self, frame_or_series):
# GH11818
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
obj = DataFrame({"A": 0}, index=rng)
if frame_or_series is Series:
obj = obj["A"]
msg = r"Cannot convert arg \[datetime\.datetime\(2010, 1, 2, 1, 0\)\] to a time"
with pytest.raises(ValueError, match=msg):
obj.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
def test_between_time(self, close_open_fixture, frame_or_series):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
if frame_or_series is not DataFrame:
ts = ts[0]
stime = time(0, 0)
etime = time(1, 0)
inc_start, inc_end = close_open_fixture
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time("00:00", "01:00")
expected = ts.between_time(stime, etime)
tm.assert_equal(result, expected)
# across midnight
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
if frame_or_series is not DataFrame:
ts = ts[0]
stime = time(22, 0)
etime = time(9, 0)
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self, frame_or_series):
# GH#20725
obj = DataFrame([[1, 2, 3], [4, 5, 6]])
if frame_or_series is not DataFrame:
obj = obj[0]
msg = "Index must be DatetimeIndex"
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
obj.between_time(start_time="00:00", end_time="12:00")
def test_between_time_axis(self, frame_or_series):
# GH#8839
rng = date_range("1/1/2000", periods=100, freq="10min")
ts = Series(np.random.randn(len(rng)), index=rng)
if frame_or_series is DataFrame:
ts = ts.to_frame()
stime, etime = ("08:00:00", "09:00:00")
expected_length = 7
assert len(ts.between_time(stime, etime)) == expected_length
assert len(ts.between_time(stime, etime, axis=0)) == expected_length
msg = f"No axis named {ts.ndim} for object type {type(ts).__name__}"
with pytest.raises(ValueError, match=msg):
ts.between_time(stime, etime, axis=ts.ndim)
def test_between_time_axis_aliases(self, axis):
# GH#8839
rng = | date_range("1/1/2000", periods=100, freq="10min") | pandas.date_range |
import numpy as np
import pandas as pd
import boto3
from io import BytesIO
import librosa
from botocore.exceptions import ClientError
def call_s3(s3_client, bucket_name, fname, folder='audio_train/'):
"""Call S3 instance to retrieve data from .wav file(or other format).
Assumes file is in folder name path"""
try:
path = folder + fname
except TypeError:
return None
try:
response = s3_client.get_object(Bucket=bucket_name, Key=path)
except ClientError as ex:
if ex.response['Error']['Code'] == 'NoSuchKey':
return dict()
data = BytesIO(response['Body'].read())
return data
def audio_vectorize(fname, data):
"""
Analyze audio data in order to extract features for model development.
Parameters:
fname: (str)
data: (_io.BytesIO)
Returns:
Feature dictionary
"""
try:
y, sr = librosa.load(data, mono=True, duration=10, offset = .5)
except RuntimeError:
return pd.Series()
chroma_stft = np.mean(librosa.feature.chroma_stft(y, sr))
spec_cent = np.mean(librosa.feature.spectral_centroid(y=y, sr=sr))
spec_bw = np.mean(librosa.feature.spectral_bandwidth(y=y, sr=sr))
rolloff = np.mean(librosa.feature.spectral_rolloff(y=y, sr=sr))
zcr = np.mean(librosa.feature.zero_crossing_rate(y))
mfccs = librosa.feature.mfcc(y=y, sr=sr)
mfccs = np.mean(mfccs, axis=1)
vector_dict = {'fname':fname, 'chroma_stft': chroma_stft, 'spectral_centroid': spec_cent, 'spectral_bandwidth': spec_bw,
'rolloff': rolloff, 'zero_crossing_rate': zcr,}
for i, mfcc in enumerate(mfccs):
vector_dict['mfccs_{}'.format(i)] = mfcc
return pd.Series(vector_dict)
def vector_merge(df, s3_client, bucket_name, s3_folder = 'audio_train/', sample_size=200, outpath=None):
"""
Helper function for merging returned feature/vector dictionaries for multiple files in an S3 bucket,
(number defined by sample_size) onto labelled dataframe, based on file names.
"""
vectors = []
for i, fname in enumerate(df.loc[:sample_size-1, 'fname']):
data = call_s3(s3_client, bucket_name, fname, s3_folder)
vec = audio_vectorize(fname, data)
vectors.append(vec)
print(i, ' ', fname)
vec_df = | pd.concat(vectors, axis=1, sort=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
This is the main class for the abacra model
"""
# enable for python2 execution
# from __future__ import print_function, division, absolute_import
import matplotlib.pylab as plt
import networkx as nx
import numpy as np
import pandas as pd
import time
import os
import pickle
import abacra.network_creation
# setting for printout of larger tables
| pd.set_option("display.max_columns",200) | pandas.set_option |
__all__ = [
"str_c",
"str_count",
"str_detect",
"str_extract",
"str_locate",
"str_replace",
"str_replace_all",
"str_sub",
"str_split",
"str_which",
"str_to_lower",
"str_to_upper",
"str_to_snake",
]
import re
from grama import make_symbolic, pipe, valid_dist, param_dist, dfdelegate
from pandas import Series
from numpy import NaN, isnan
# ------------------------------------------------------------------------------
# String helpers
# - a straight port of stringr
# ------------------------------------------------------------------------------
## Vectorized Concatenate
# --------------------------------------------------
def _vec_len(x):
try:
if isinstance(x, str):
raise TypeError
return len(x)
except TypeError:
return 1
def _ensure_series(x, l, length):
if (l == 1) and (not isinstance(x, Series)):
return Series([x] * length).astype(str)
return Series(x).astype(str).reset_index(drop=True)
@make_symbolic
def str_c(*args, sep=""):
"""
Concatenate strings
"""
## Pass-through a single arg
if len(args) < 2:
return | Series(args[0]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Originally created on Tue Feb 17 2015
This script has been repurposed to provide summary counts from class files. May 2020
This script will grab the biovolume feature data from extracted feature files
for all images in an automated class file.
Can bin data by category or leave each image separate.
@author: <NAME>
"""
# script to extract the biovolume estimates from IFCB V2 feature files
# and sum them per category for class files
# this will read a directory of class files and search the feature path for
# those files, pulling the biovolume from them
# 06/13/2017 DWH
# this script is a modified version of the biovolume grabber script
# this script will take the biovolume value for each cell, convert it to
# units of carbon following formulas from Menden_Deuer and Lessard 2000
# then sum the total carbon per category
from scipy.io import loadmat
import os
import pandas as pd
import numpy as np
from datetime import datetime
__author__ = 'dhenrichs'
# path to the feature files
#feature_path = '/data4/processed_data/extracted_features/2014/'
#feature_path = '/data4/Cruise_data/HRR_cruise/processed_data/extracted_features/'
# path to the class files for binning the biovolumes into categories
#class_path = '/data4/manual_classify_results/temp_alyssa_manual/' #processed_data/class_files/2014/'
#class_path = '/data4/Cruise_data/HRR_cruise/manual_corrections_cruise_data_31Jan2019/'
#class_path = '/data4/Cruise_data/HRR_cruise/class_files_cruise_data_CNN/'
# path to where the outfiles with biovolume will be located
#outpath = '/data4/test_biovolume/'
# limit files to one particular month/day IMPORTANT: if you don't want to limit the date, just put None
date_limiter = None # a string (e.g. 'D20170404') or None (you literally have to type None)
#are you using automated class files or manually corrected mat files?
#automated_or_manual = 'automated' #can be 'automated' or 'manual'
#are these CNN results
#CNN = True
def grab_biovolume(in_feature, in_class, automated):
'''this function is designed to return the total sum of carbon per category.
this will NOT return the carbon for each image'''
feature_data = load_feature_file(in_feature)
if automated == 'automated':
feature_data['Biovolume'] = convert_biovolume_pixels_to_microns(feature_data['Biovolume'])
category_list, class_data = load_class_file_automated(in_class)
if 'unclassified' not in category_list:
category_list.append('unclassified')
outdata = pd.DataFrame([0]*len(category_list), index=category_list, columns=['Biovolume']).T
for image_cat, feat_size in zip(class_data, feature_data['Biovolume']):
carbon_value = calculate_carbon_from_biovolume(feat_size, image_cat)
outdata[image_cat] += carbon_value
return outdata.T
elif automated == 'manual':
category_list, class_data, roinums = load_class_file_manual(in_class)
converted_data = | pd.DataFrame(index=roinums) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
(u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
(u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
(u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),
(u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]
index = pd.MultiIndex.from_tuples(t)
assert not index.has_duplicates
# handle int64 overflow if possible
def check(nlevels, with_nulls):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
if with_nulls: # inject some null values
labels[500] = -1 # common nan value
labels = [labels.copy() for i in range(nlevels)]
for i in range(nlevels):
labels[i][500 + i - nlevels // 2] = -1
labels += [np.array([-1, 1]).repeat(500)]
else:
labels = [labels] * nlevels + [np.arange(2).repeat(500)]
levels = [level] * nlevels + [[0, 1]]
# no dups
index = MultiIndex(levels=levels, labels=labels)
assert not index.has_duplicates
# with a dup
if with_nulls:
def f(a):
return np.insert(a, 1000, a[0])
labels = list(map(f, labels))
index = MultiIndex(levels=levels, labels=labels)
else:
values = index.values.tolist()
index = MultiIndex.from_tuples(values + [values[0]])
assert index.has_duplicates
# no overflow
check(4, False)
check(4, True)
# overflow possible
check(8, False)
check(8, True)
# GH 9125
n, k = 200, 5000
levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
labels = [np.random.choice(n, k * n) for lev in levels]
mi = MultiIndex(levels=levels, labels=labels)
for keep in ['first', 'last', False]:
left = mi.duplicated(keep=keep)
right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep)
tm.assert_numpy_array_equal(left, right)
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
# all possible unique combinations, including nan
lab = product(range(-1, n), range(-1, m))
mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],
labels=np.random.permutation(list(lab)).T)
assert len(mi) == (n + 1) * (m + 1)
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
len(mi), dtype='bool'))
def test_duplicate_meta_data(self):
# GH 10115
index = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [index,
index.set_names([None, None]),
index.set_names([None, 'Num']),
index.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_get_unique_index(self):
idx = self.index[[0, 1, 0, 1, 1, 0, 0]]
expected = self.index._shallow_copy(idx[[0, 1]])
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(self, names):
mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')],
names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(self, level):
# GH #17896 - with level= argument
result = self.index.unique(level=level)
expected = self.index.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = pd.MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
def test_unique_datetimelike(self):
idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = pd.MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
def test_tolist(self):
result = self.index.tolist()
exp = list(self.index.values)
assert result == exp
def test_repr_with_unicode_data(self):
with pd.core.config.option_context("display.encoding", 'UTF-8'):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
index = pd.DataFrame(d).set_index(["a", "b"]).index
assert "\\u" not in repr(index) # we don't want unicode-escaped
def test_repr_roundtrip(self):
mi = MultiIndex.from_product([list('ab'), range(3)],
names=['first', 'second'])
str(mi)
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
mi_u = MultiIndex.from_product(
[list(u'ab'), range(3)], names=['first', 'second'])
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
# formatting
if PY3:
str(mi)
else:
compat.text_type(mi)
# long format
mi = MultiIndex.from_product([list('abcdefg'), range(10)],
names=['first', 'second'])
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
def test_str(self):
# tested elsewhere
pass
def test_unicode_string_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')],
names=['x', 'y'])
assert x[1:].names == x.names
def test_isna_behavior(self):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
with pytest.raises(NotImplementedError):
pd.isna(self.index)
def test_level_setting_resets_attributes(self):
ind = pd.MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert ind.is_monotonic
ind.set_levels([['A', 'B'], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_is_monotonic_increasing(self):
i = MultiIndex.from_product([np.arange(10),
np.arange(10)], names=['one', 'two'])
assert i.is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([[1.0, np.nan, 2.0], ['a', 'b', 'c']])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
# string ordering
i = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert not i.is_monotonic
assert not Index(i.values).is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex(levels=[['bar', 'baz', 'foo', 'qux'],
['mom', 'next', 'zenith']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values)._is_strictly_monotonic_increasing
# mixed levels, hits the TypeError
i = MultiIndex(
levels=[[1, 2, 3, 4], ['gb00b03mlx29', 'lu0197800237',
'nl0000289783',
'nl0000289965', 'nl0000301109']],
labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],
names=['household_id', 'asset_id'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
# empty
i = MultiIndex.from_arrays([[], []])
assert i.is_monotonic
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values)._is_strictly_monotonic_increasing
def test_is_monotonic_decreasing(self):
i = MultiIndex.from_product([np.arange(9, -1, -1),
np.arange(9, -1, -1)],
names=['one', 'two'])
assert i.is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([[2.0, np.nan, 1.0], ['c', 'b', 'a']])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
# string ordering
i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'],
['three', 'two', 'one']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert not i.is_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'],
['zenith', 'next', 'mom']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values)._is_strictly_monotonic_decreasing
# mixed levels, hits the TypeError
i = MultiIndex(
levels=[[4, 3, 2, 1], ['nl0000301109', 'nl0000289965',
'nl0000289783', 'lu0197800237',
'gb00b03mlx29']],
labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],
names=['household_id', 'asset_id'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
# empty
i = MultiIndex.from_arrays([[], []])
assert i.is_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values)._is_strictly_monotonic_decreasing
def test_is_strictly_monotonic_increasing(self):
idx = pd.MultiIndex(levels=[['bar', 'baz'], ['mom', 'next']],
labels=[[0, 0, 1, 1], [0, 0, 0, 1]])
assert idx.is_monotonic_increasing
assert not idx._is_strictly_monotonic_increasing
def test_is_strictly_monotonic_decreasing(self):
idx = pd.MultiIndex(levels=[['baz', 'bar'], ['next', 'mom']],
labels=[[0, 0, 1, 1], [0, 0, 0, 1]])
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
def test_reconstruct_sort(self):
# starts off lexsorted & monotonic
mi = MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert mi.is_lexsorted()
assert mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert recons.is_lexsorted()
assert recons.is_monotonic
assert mi is recons
assert mi.equals(recons)
assert Index(mi.values).equals( | Index(recons.values) | pandas.Index |
##### file path
### input
# data_set keys and lebels
path_df_part_1_uic_label = "df_part_1_uic_label.csv"
path_df_part_2_uic_label = "df_part_2_uic_label.csv"
path_df_part_3_uic = "df_part_3_uic.csv"
# data_set features
path_df_part_1_U = "df_part_1_U.csv"
path_df_part_1_I = "df_part_1_I.csv"
path_df_part_1_C = "df_part_1_C.csv"
path_df_part_1_IC = "df_part_1_IC.csv"
path_df_part_1_UI = "df_part_1_UI.csv"
path_df_part_1_UC = "df_part_1_UC.csv"
path_df_part_2_U = "df_part_2_U.csv"
path_df_part_2_I = "df_part_2_I.csv"
path_df_part_2_C = "df_part_2_C.csv"
path_df_part_2_IC = "df_part_2_IC.csv"
path_df_part_2_UI = "df_part_2_UI.csv"
path_df_part_2_UC = "df_part_2_UC.csv"
path_df_part_3_U = "df_part_3_U.csv"
path_df_part_3_I = "df_part_3_I.csv"
path_df_part_3_C = "df_part_3_C.csv"
path_df_part_3_IC = "df_part_3_IC.csv"
path_df_part_3_UI = "df_part_3_UI.csv"
path_df_part_3_UC = "df_part_3_UC.csv"
### out file
### intermediate file
# data partition with diffferent label
path_df_part_1_uic_label_0 = "df_part_1_uic_label_0.csv"
path_df_part_1_uic_label_1 = "df_part_1_uic_label_1.csv"
path_df_part_2_uic_label_0 = "df_part_2_uic_label_0.csv"
path_df_part_2_uic_label_1 = "df_part_2_uic_label_1.csv"
# training set keys uic-label with k_means clusters' label
path_df_part_1_uic_label_cluster = "df_part_1_uic_label_cluster.csv"
path_df_part_2_uic_label_cluster = "df_part_2_uic_label_cluster.csv"
# scalers for data standardization store as python pickle
# for each part's features
path_df_part_1_scaler = "df_part_1_scaler"
path_df_part_2_scaler = "df_part_2_scaler"
import pandas as pd
import numpy as np
def df_read(path, mode='r'):
'''the definition of dataframe loading function
'''
path_df = open(path, mode)
try:
df = pd.read_csv(path_df, index_col=False)
finally:
path_df.close()
return df
def subsample(df, sub_size):
'''the definition of sub-sampling function
@param df: dataframe
@param sub_size: sub_sample set size
@return sub-dataframe with the same formation of df
'''
if sub_size >= len(df):
return df
else:
return df.sample(n=sub_size)
########################################################################
'''Step 1: dividing of positive and negative sub-set by u-i-c-label keys
p.s. we first generate u-i-C key, then merging for data set and operation by chunk
such strange operation designed for saving my poor PC-MEM.
'''
df_part_1_uic_label = df_read(path_df_part_1_uic_label) # loading total keys
df_part_2_uic_label = df_read(path_df_part_2_uic_label)
df_part_1_uic_label_0 = df_part_1_uic_label[df_part_1_uic_label['label'] == 0]
df_part_1_uic_label_1 = df_part_1_uic_label[df_part_1_uic_label['label'] == 1]
df_part_2_uic_label_0 = df_part_2_uic_label[df_part_2_uic_label['label'] == 0]
df_part_2_uic_label_1 = df_part_2_uic_label[df_part_2_uic_label['label'] == 1]
df_part_1_uic_label_0.to_csv(path_df_part_1_uic_label_0, index=False)
df_part_1_uic_label_1.to_csv(path_df_part_1_uic_label_1, index=False)
df_part_2_uic_label_0.to_csv(path_df_part_2_uic_label_0, index=False)
df_part_2_uic_label_1.to_csv(path_df_part_2_uic_label_1, index=False)
#######################################################################
'''Step 2: clustering on negative sub-set
clusters number ~ 35, using mini-batch-k-means
'''
# clustering based on sklearn
from sklearn import preprocessing
from sklearn.cluster import MiniBatchKMeans
import pickle
##### part_1 #####
# loading features
df_part_1_U = df_read(path_df_part_1_U)
df_part_1_I = df_read(path_df_part_1_I)
df_part_1_C = df_read(path_df_part_1_C)
df_part_1_IC = df_read(path_df_part_1_IC)
df_part_1_UI = df_read(path_df_part_1_UI)
df_part_1_UC = df_read(path_df_part_1_UC)
# process by chunk as ui-pairs size is too big
# for get scale transform mechanism to large scale of data
scaler_1 = preprocessing.StandardScaler()
batch = 0
for df_part_1_uic_label_0 in pd.read_csv(open(path_df_part_1_uic_label_0, 'r'), chunksize=150000):
try:
# construct of part_1's sub-training set
train_data_df_part_1 = pd.merge(df_part_1_uic_label_0, df_part_1_U, how='left', on=['user_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_I, how='left', on=['item_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_C, how='left', on=['item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_IC, how='left', on=['item_id', 'item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_data_df_part_1 = | pd.merge(train_data_df_part_1, df_part_1_UC, how='left', on=['user_id', 'item_category']) | pandas.merge |
import csv
import pandas
def readCSV():
with open("weather_data.csv") as file:
data = csv.reader(file)
temperatures = []
for row in data:
temperature = row[1]
if temperature != 'temp':
temperatures.append(int(temperature))
else:
pass
print(temperatures)
data = pandas.read_csv('weather_data.csv')
average = data['temp'].mean()
print(average)
max_temp = data['temp'].max()
print(max_temp)
print(data.condition)
print(data[data.day == 'Monday'])
print(data[data.temp == data.temp.max()])
monday = data[data.day == 'Monday']
print(monday.temp)
dict = {
'students': ['ankit', 'garima', 'rohit'],
'marks': [40, 50, 30],
}
dataframe = | pandas.DataFrame(dict) | pandas.DataFrame |
import pandas as pd
counties = ['Antrim','Armagh','Carlow','Cavan','Clare','Cork','Derry','Donegal','Down','Dublin','Fermanagh','Galway',
'Kerry','Kildare','Kilkenny','Laois','Leitrim','Limerick','Longford','Louth','Mayo','Meath','Monaghan',
'Offaly','Roscommon','Sligo','Tipperary','Tyrone','Waterford','Westmeath','Wexford','Wicklow']
#creating a string with value ireland
country = 'IRELAND'
#new dictionary
ireland = {'Country': country, 'County': counties}
#creating a dataframe
df = | pd.DataFrame(ireland) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
pd.Series(dtype="float64"),
pd.Series([], dtype="float64"),
],
)
def test_series_keys(ps):
gds = cudf.from_pandas(ps)
if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex):
assert_eq(ps.keys().astype("float64"), gds.keys())
else:
assert_eq(ps.keys(), gds.keys())
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
pd.DataFrame(),
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
pd.DataFrame([]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([], index=[100]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = cudf.from_pandas(other)
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({12: [], 22: []}),
pd.DataFrame([[1, 2], [3, 4]], columns=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[0, 1], index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[1, 0], index=[7, 8]),
pd.DataFrame(
{
23: [315.3324, 3243.32432, 3232.332, -100.32],
33: [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
0: [315.3324, 3243.32432, 3232.332, -100.32],
1: [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.Series([10, 11, 23, 234, 13]),
pytest.param(
pd.Series([10, 11, 23, 234, 13], index=[11, 12, 13, 44, 33]),
marks=pytest.mark.xfail(
reason="pandas bug: "
"https://github.com/pandas-dev/pandas/issues/35092"
),
),
{1: 1},
{0: 10, 1: 100, 2: 102},
],
)
@pytest.mark.parametrize("sort", [False, True])
def test_dataframe_append_series_dict(df, other, sort):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
if isinstance(other, pd.Series):
other_gd = cudf.from_pandas(other)
else:
other_gd = other
expected = pdf.append(other_pd, ignore_index=True, sort=sort)
actual = gdf.append(other_gd, ignore_index=True, sort=sort)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_series_mixed_index():
df = cudf.DataFrame({"first": [], "d": []})
sr = cudf.Series([1, 2, 3, 4])
with pytest.raises(
TypeError,
match=re.escape(
"cudf does not support mixed types, please type-cast "
"the column index of dataframe and index of series "
"to same dtypes."
),
):
df.append(sr, ignore_index=True)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
[pd.DataFrame([[5, 6], [7, 8]], columns=list("AB"))],
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
],
[pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()],
[
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
],
[
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
],
[pd.DataFrame([]), pd.DataFrame([], index=[100])],
[
pd.DataFrame([]),
pd.DataFrame([], index=[100]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
)
@pytest.mark.parametrize(
"other",
[
[[1, 2], [10, 100]],
[[1, 2, 10, 100, 0.1, 0.2, 0.0021]],
[[]],
[[], [], [], []],
[[0.23, 0.00023, -10.00, 100, 200, 1000232, 1232.32323]],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_error():
df = cudf.DataFrame({"a": [1, 2, 3]})
ps = cudf.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Can only append a Series if ignore_index=True "
"or if the Series has a name",
):
df.append(ps)
def test_cudf_arrow_array_error():
df = cudf.DataFrame({"a": [1, 2, 3]})
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Table via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Table, consider "
"using .to_arrow()",
):
df.__arrow_array__()
sr = cudf.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
sr = cudf.Series(["a", "b", "c"])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("axis", [0, 1])
def test_dataframe_sample_basic(n, frac, replace, axis):
# as we currently don't support column with same name
if axis == 1 and replace:
return
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5],
"float": [0.05, 0.2, 0.3, 0.2, 0.25],
"int": [1, 3, 5, 4, 2],
},
index=[1, 2, 3, 4, 5],
)
df = cudf.DataFrame.from_pandas(pdf)
random_state = 0
try:
pout = pdf.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
except BaseException:
assert_exceptions_equal(
lfunc=pdf.sample,
rfunc=df.sample,
lfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
"axis": axis,
},
),
rfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
"axis": axis,
},
),
)
else:
gout = df.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
assert pout.shape == gout.shape
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("random_state", [1, np.random.mtrand.RandomState(10)])
def test_dataframe_reproducibility(replace, random_state):
df = cudf.DataFrame({"a": cupy.arange(0, 1024)})
expected = df.sample(1024, replace=replace, random_state=random_state)
out = df.sample(1024, replace=replace, random_state=random_state)
assert_eq(expected, out)
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
def test_series_sample_basic(n, frac, replace):
psr = pd.Series([1, 2, 3, 4, 5])
sr = cudf.Series.from_pandas(psr)
random_state = 0
try:
pout = psr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
except BaseException:
assert_exceptions_equal(
lfunc=psr.sample,
rfunc=sr.sample,
lfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
},
),
rfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
},
),
)
else:
gout = sr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
assert pout.shape == gout.shape
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
pd.DataFrame(columns=["a", "b", "c", "d"]),
pd.DataFrame(columns=["a", "b", "c", "d"], index=[100]),
pd.DataFrame(
columns=["a", "b", "c", "d"], index=[100, 10000, 2131, 133]
),
pd.DataFrame({"a": [1, 2, 3], "b": ["abc", "xyz", "klm"]}),
],
)
def test_dataframe_empty(df):
pdf = df
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.empty, gdf.empty)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
pd.DataFrame(columns=["a", "b", "c", "d"]),
pd.DataFrame(columns=["a", "b", "c", "d"], index=[100]),
pd.DataFrame(
columns=["a", "b", "c", "d"], index=[100, 10000, 2131, 133]
),
pd.DataFrame({"a": [1, 2, 3], "b": ["abc", "xyz", "klm"]}),
],
)
def test_dataframe_size(df):
pdf = df
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.size, gdf.size)
@pytest.mark.parametrize(
"ps",
[
pd.Series(dtype="float64"),
pd.Series(index=[100, 10, 1, 0], dtype="float64"),
pd.Series([], dtype="float64"),
pd.Series(["a", "b", "c", "d"]),
pd.Series(["a", "b", "c", "d"], index=[0, 1, 10, 11]),
],
)
def test_series_empty(ps):
ps = ps
gs = cudf.from_pandas(ps)
assert_eq(ps.empty, gs.empty)
@pytest.mark.parametrize(
"data",
[
[],
[1],
{"a": [10, 11, 12]},
{
"a": [10, 11, 12],
"another column name": [12, 22, 34],
"xyz": [0, 10, 11],
},
],
)
@pytest.mark.parametrize("columns", [["a"], ["another column name"], None])
def test_dataframe_init_with_columns(data, columns):
pdf = pd.DataFrame(data, columns=columns)
gdf = cudf.DataFrame(data, columns=columns)
assert_eq(
pdf,
gdf,
check_index_type=False if len(pdf.index) == 0 else True,
check_dtype=False if pdf.empty and len(pdf.columns) else True,
)
@pytest.mark.parametrize(
"data, ignore_dtype",
[
([pd.Series([1, 2, 3])], False),
([pd.Series(index=[1, 2, 3], dtype="float64")], False),
([pd.Series(name="empty series name", dtype="float64")], False),
(
[pd.Series([1]), pd.Series([], dtype="float64"), pd.Series([3])],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series([3], name="series that is named"),
],
False,
),
([pd.Series([1, 2, 3], name="hi")] * 10, False),
([pd.Series([1, 2, 3], name=None, index=[10, 11, 12])] * 10, False),
(
[
pd.Series([1, 2, 3], name=None, index=[10, 11, 12]),
pd.Series([1, 2, 30], name=None, index=[13, 144, 15]),
],
True,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], name="abc", dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([1, -100, 200, -399, 400], name="abc"),
pd.Series([111, 222, 333], index=[10, 11, 12]),
],
False,
),
],
)
@pytest.mark.parametrize(
"columns", [None, ["0"], [0], ["abc"], [144, 13], [2, 1, 0]]
)
def test_dataframe_init_from_series_list(data, ignore_dtype, columns):
gd_data = [cudf.from_pandas(obj) for obj in data]
expected = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(gd_data, columns=columns)
if ignore_dtype:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data, ignore_dtype, index",
[
([pd.Series([1, 2, 3])], False, ["a", "b", "c"]),
([pd.Series(index=[1, 2, 3], dtype="float64")], False, ["a", "b"]),
(
[pd.Series(name="empty series name", dtype="float64")],
False,
["index1"],
),
(
[pd.Series([1]), pd.Series([], dtype="float64"), pd.Series([3])],
False,
["0", "2", "1"],
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series([3], name="series that is named"),
],
False,
["_", "+", "*"],
),
([pd.Series([1, 2, 3], name="hi")] * 10, False, ["mean"] * 10),
(
[pd.Series([1, 2, 3], name=None, index=[10, 11, 12])] * 10,
False,
["abc"] * 10,
),
(
[
pd.Series([1, 2, 3], name=None, index=[10, 11, 12]),
pd.Series([1, 2, 30], name=None, index=[13, 144, 15]),
],
True,
["set_index_a", "set_index_b"],
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
],
False,
["a", "b", "c"],
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], name="abc", dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
],
False,
["a", "v", "z"],
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([1, -100, 200, -399, 400], name="abc"),
pd.Series([111, 222, 333], index=[10, 11, 12]),
],
False,
["a", "v", "z"],
),
],
)
@pytest.mark.parametrize(
"columns", [None, ["0"], [0], ["abc"], [144, 13], [2, 1, 0]]
)
def test_dataframe_init_from_series_list_with_index(
data, ignore_dtype, index, columns
):
gd_data = [cudf.from_pandas(obj) for obj in data]
expected = pd.DataFrame(data, columns=columns, index=index)
actual = cudf.DataFrame(gd_data, columns=columns, index=index)
if ignore_dtype:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data, index",
[
([pd.Series([1, 2]), pd.Series([1, 2])], ["a", "b", "c"]),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series([3], name="series that is named"),
],
["_", "+"],
),
([pd.Series([1, 2, 3], name="hi")] * 10, ["mean"] * 9),
],
)
def test_dataframe_init_from_series_list_with_index_error(data, index):
gd_data = [cudf.from_pandas(obj) for obj in data]
assert_exceptions_equal(
pd.DataFrame,
cudf.DataFrame,
([data], {"index": index}),
([gd_data], {"index": index}),
)
@pytest.mark.parametrize(
"data",
[
[pd.Series([1, 2, 3], index=["a", "a", "a"])],
[pd.Series([1, 2, 3], index=["a", "a", "a"])] * 4,
[
pd.Series([1, 2, 3], index=["a", "b", "a"]),
pd.Series([1, 2, 3], index=["b", "b", "a"]),
],
[
pd.Series([1, 2, 3], index=["a", "b", "z"]),
pd.Series([1, 2, 3], index=["u", "b", "a"]),
pd.Series([1, 2, 3], index=["u", "b", "u"]),
],
],
)
def test_dataframe_init_from_series_list_duplicate_index_error(data):
gd_data = [cudf.from_pandas(obj) for obj in data]
assert_exceptions_equal(
lfunc=pd.DataFrame,
rfunc=cudf.DataFrame,
lfunc_args_and_kwargs=([], {"data": data}),
rfunc_args_and_kwargs=([], {"data": gd_data}),
check_exception_type=False,
)
def test_dataframe_iterrows_itertuples():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
with pytest.raises(
TypeError,
match=re.escape(
"cuDF does not support iteration of DataFrame "
"via itertuples. Consider using "
"`.to_pandas().itertuples()` "
"if you wish to iterate over namedtuples."
),
):
df.itertuples()
with pytest.raises(
TypeError,
match=re.escape(
"cuDF does not support iteration of DataFrame "
"via iterrows. Consider using "
"`.to_pandas().iterrows()` "
"if you wish to iterate over each row."
),
):
df.iterrows()
@pytest.mark.parametrize(
"df",
[
cudf.DataFrame(
{
"a": [1, 2, 3],
"b": [10, 22, 33],
"c": [0.3234, 0.23432, 0.0],
"d": ["hello", "world", "hello"],
}
),
cudf.DataFrame(
{
"a": [1, 2, 3],
"b": ["hello", "world", "hello"],
"c": [0.3234, 0.23432, 0.0],
}
),
pytest.param(
cudf.DataFrame(
{
"int_data": [1, 2, 3],
"str_data": ["hello", "world", "hello"],
"float_data": [0.3234, 0.23432, 0.0],
"timedelta_data": cudf.Series(
[1, 2, 1], dtype="timedelta64[ns]"
),
"datetime_data": cudf.Series(
[1, 2, 1], dtype="datetime64[ns]"
),
}
),
marks=pytest.mark.xfail(
reason="https://github.com/rapidsai/cudf/issues/6219"
),
),
pytest.param(
cudf.DataFrame(
{
"int_data": [1, 2, 3],
"str_data": ["hello", "world", "hello"],
"float_data": [0.3234, 0.23432, 0.0],
"timedelta_data": cudf.Series(
[1, 2, 1], dtype="timedelta64[ns]"
),
"datetime_data": cudf.Series(
[1, 2, 1], dtype="datetime64[ns]"
),
"category_data": cudf.Series(
["a", "a", "b"], dtype="category"
),
}
),
marks=pytest.mark.xfail(
reason="https://github.com/rapidsai/cudf/issues/6219"
),
),
],
)
@pytest.mark.parametrize(
"include",
[None, "all", ["object"], ["int"], ["object", "int", "category"]],
)
def test_describe_misc_include(df, include):
pdf = df.to_pandas()
expected = pdf.describe(include=include, datetime_is_numeric=True)
actual = df.describe(include=include, datetime_is_numeric=True)
for col in expected.columns:
if expected[col].dtype == np.dtype("object"):
expected[col] = expected[col].fillna(-1).astype("str")
actual[col] = actual[col].fillna(-1).astype("str")
assert_eq(expected, actual)
@pytest.mark.parametrize(
"df",
[
cudf.DataFrame(
{
"a": [1, 2, 3],
"b": [10, 22, 33],
"c": [0.3234, 0.23432, 0.0],
"d": ["hello", "world", "hello"],
}
),
cudf.DataFrame(
{
"a": [1, 2, 3],
"b": ["hello", "world", "hello"],
"c": [0.3234, 0.23432, 0.0],
}
),
pytest.param(
cudf.DataFrame(
{
"int_data": [1, 2, 3],
"str_data": ["hello", "world", "hello"],
"float_data": [0.3234, 0.23432, 0.0],
"timedelta_data": cudf.Series(
[1, 2, 1], dtype="timedelta64[ns]"
),
"datetime_data": cudf.Series(
[1, 2, 1], dtype="datetime64[ns]"
),
}
),
marks=pytest.mark.xfail(
reason="https://github.com/rapidsai/cudf/issues/6219"
),
),
pytest.param(
cudf.DataFrame(
{
"int_data": [1, 2, 3],
"str_data": ["hello", "world", "hello"],
"float_data": [0.3234, 0.23432, 0.0],
"timedelta_data": cudf.Series(
[1, 2, 1], dtype="timedelta64[ns]"
),
"datetime_data": cudf.Series(
[1, 2, 1], dtype="datetime64[ns]"
),
"category_data": cudf.Series(
["a", "a", "b"], dtype="category"
),
}
),
marks=pytest.mark.xfail(
reason="https://github.com/rapidsai/cudf/issues/6219"
),
),
],
)
@pytest.mark.parametrize(
"exclude", [None, ["object"], ["int"], ["object", "int", "category"]]
)
def test_describe_misc_exclude(df, exclude):
pdf = df.to_pandas()
expected = pdf.describe(exclude=exclude, datetime_is_numeric=True)
actual = df.describe(exclude=exclude, datetime_is_numeric=True)
for col in expected.columns:
if expected[col].dtype == np.dtype("object"):
expected[col] = expected[col].fillna(-1).astype("str")
actual[col] = actual[col].fillna(-1).astype("str")
assert_eq(expected, actual)
@pytest.mark.parametrize(
"df",
[
cudf.DataFrame({"a": [1, 2, 3]}),
cudf.DataFrame(
{"a": [1, 2, 3], "b": ["a", "z", "c"]}, index=["a", "z", "x"]
),
cudf.DataFrame(
{
"a": [1, 2, 3, None, 2, 1, None],
"b": ["a", "z", "c", "a", "v", "z", "z"],
}
),
cudf.DataFrame({"a": [], "b": []}),
cudf.DataFrame({"a": [None, None], "b": [None, None]}),
cudf.DataFrame(
{
"a": ["hello", "world", "rapids", "ai", "nvidia"],
"b": cudf.Series([1, 21, 21, 11, 11], dtype="timedelta64[s]"),
}
),
cudf.DataFrame(
{
"a": ["hello", None, "world", "rapids", None, "ai", "nvidia"],
"b": cudf.Series(
[1, 21, None, 11, None, 11, None], dtype="datetime64[s]"
),
}
),
],
)
@pytest.mark.parametrize("numeric_only", [True, False])
@pytest.mark.parametrize("dropna", [True, False])
def test_dataframe_mode(df, numeric_only, dropna):
pdf = df.to_pandas()
expected = pdf.mode(numeric_only=numeric_only, dropna=dropna)
actual = df.mode(numeric_only=numeric_only, dropna=dropna)
assert_eq(expected, actual, check_dtype=False)
@pytest.mark.parametrize("lhs, rhs", [("a", "a"), ("a", "b"), (1, 1.0)])
def test_equals_names(lhs, rhs):
lhs = cudf.DataFrame({lhs: [1, 2]})
rhs = cudf.DataFrame({rhs: [1, 2]})
got = lhs.equals(rhs)
expect = lhs.to_pandas().equals(rhs.to_pandas())
assert_eq(expect, got)
def test_equals_dtypes():
lhs = cudf.DataFrame({"a": [1, 2.0]})
rhs = cudf.DataFrame({"a": [1, 2]})
got = lhs.equals(rhs)
expect = lhs.to_pandas().equals(rhs.to_pandas())
assert_eq(expect, got)
@pytest.mark.parametrize(
"df1",
[
pd.DataFrame({"a": [10, 11, 12]}, index=["a", "b", "z"]),
pd.DataFrame({"z": ["a"]}),
],
)
@pytest.mark.parametrize(
"df2",
[
pd.DataFrame(),
pd.DataFrame({"a": ["a", "a", "c", "z", "A"], "z": [1, 2, 3, 4, 5]}),
],
)
@pytest.mark.parametrize(
"op",
[
operator.eq,
operator.ne,
operator.lt,
operator.gt,
operator.le,
operator.ge,
],
)
def test_dataframe_error_equality(df1, df2, op):
gdf1 = cudf.from_pandas(df1)
gdf2 = cudf.from_pandas(df2)
assert_exceptions_equal(op, op, ([df1, df2],), ([gdf1, gdf2],))
@pytest.mark.parametrize(
"df,expected_pdf",
[
(
cudf.DataFrame(
{
"a": cudf.Series([1, 2, None, 3], dtype="uint8"),
"b": cudf.Series([23, None, None, 32], dtype="uint16"),
}
),
pd.DataFrame(
{
"a": pd.Series([1, 2, None, 3], dtype=pd.UInt8Dtype()),
"b": pd.Series(
[23, None, None, 32], dtype=pd.UInt16Dtype()
),
}
),
),
(
cudf.DataFrame(
{
"a": cudf.Series([None, 123, None, 1], dtype="uint32"),
"b": cudf.Series(
[234, 2323, 23432, None, None, 224], dtype="uint64"
),
}
),
pd.DataFrame(
{
"a": pd.Series(
[None, 123, None, 1], dtype=pd.UInt32Dtype()
),
"b": pd.Series(
[234, 2323, 23432, None, None, 224],
dtype=pd.UInt64Dtype(),
),
}
),
),
(
cudf.DataFrame(
{
"a": cudf.Series(
[-10, 1, None, -1, None, 3], dtype="int8"
),
"b": cudf.Series(
[111, None, 222, None, 13], dtype="int16"
),
}
),
pd.DataFrame(
{
"a": pd.Series(
[-10, 1, None, -1, None, 3], dtype=pd.Int8Dtype()
),
"b": pd.Series(
[111, None, 222, None, 13], dtype=pd.Int16Dtype()
),
}
),
),
(
cudf.DataFrame(
{
"a": cudf.Series(
[11, None, 22, 33, None, 2, None, 3], dtype="int32"
),
"b": cudf.Series(
[32431, None, None, 32322, 0, 10, -32324, None],
dtype="int64",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
[11, None, 22, 33, None, 2, None, 3],
dtype=pd.Int32Dtype(),
),
"b": pd.Series(
[32431, None, None, 32322, 0, 10, -32324, None],
dtype=pd.Int64Dtype(),
),
}
),
),
(
cudf.DataFrame(
{
"a": cudf.Series(
[True, None, False, None, False, True, True, False],
dtype="bool_",
),
"b": cudf.Series(
[
"abc",
"a",
None,
"hello world",
"foo buzz",
"",
None,
"rapids ai",
],
dtype="object",
),
"c": cudf.Series(
[0.1, None, 0.2, None, 3, 4, 1000, None],
dtype="float64",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
[True, None, False, None, False, True, True, False],
dtype=pd.BooleanDtype(),
),
"b": pd.Series(
[
"abc",
"a",
None,
"hello world",
"foo buzz",
"",
None,
"rapids ai",
],
dtype=pd.StringDtype(),
),
"c": pd.Series(
[0.1, None, 0.2, None, 3, 4, 1000, None],
dtype=pd.Float64Dtype(),
),
}
),
),
],
)
def test_dataframe_to_pandas_nullable_dtypes(df, expected_pdf):
actual_pdf = df.to_pandas(nullable=True)
assert_eq(actual_pdf, expected_pdf)
@pytest.mark.parametrize(
"data",
[
[{"a": 1, "b": 2, "c": 3}, {"a": 4, "b": 5, "c": 6}],
[{"a": 1, "b": 2, "c": None}, {"a": None, "b": 5, "c": 6}],
[{"a": 1, "b": 2}, {"a": 1, "b": 5, "c": 6}],
[{"a": 1, "b": 2}, {"b": 5, "c": 6}],
[{}, {"a": 1, "b": 5, "c": 6}],
[{"a": 1, "b": 2, "c": 3}, {"a": 4.5, "b": 5.5, "c": 6.5}],
],
)
def test_dataframe_init_from_list_of_dicts(data):
expect = pd.DataFrame(data)
got = cudf.DataFrame(data)
assert_eq(expect, got)
def test_dataframe_pipe():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
def add_int_col(df, column):
df[column] = df._constructor_sliced([10, 20, 30, 40])
return df
def add_str_col(df, column):
df[column] = df._constructor_sliced(["a", "b", "xyz", "ai"])
return df
expected = (
pdf.pipe(add_int_col, "one")
.pipe(add_int_col, column="two")
.pipe(add_str_col, "three")
)
actual = (
gdf.pipe(add_int_col, "one")
.pipe(add_int_col, column="two")
.pipe(add_str_col, "three")
)
assert_eq(expected, actual)
expected = (
pdf.pipe((add_str_col, "df"), column="one")
.pipe(add_str_col, column="two")
.pipe(add_int_col, "three")
)
actual = (
gdf.pipe((add_str_col, "df"), column="one")
.pipe(add_str_col, column="two")
.pipe(add_int_col, "three")
)
assert_eq(expected, actual)
def test_dataframe_pipe_error():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
def custom_func(df, column):
df[column] = df._constructor_sliced([10, 20, 30, 40])
return df
assert_exceptions_equal(
lfunc=pdf.pipe,
rfunc=gdf.pipe,
lfunc_args_and_kwargs=([(custom_func, "columns")], {"columns": "d"}),
rfunc_args_and_kwargs=([(custom_func, "columns")], {"columns": "d"}),
)
@pytest.mark.parametrize(
"op",
[
"count",
"cummin",
"cummax",
"cummax",
"cumprod",
"kurt",
"kurtosis",
"skew",
],
)
def test_dataframe_axis1_unsupported_ops(op):
df = cudf.DataFrame({"a": [1, 2, 3], "b": [8, 9, 10]})
with pytest.raises(
NotImplementedError, match="Only axis=0 is currently supported."
):
getattr(df, op)(axis=1)
def test_dataframe_from_pandas_duplicate_columns():
pdf = pd.DataFrame(columns=["a", "b", "c", "a"])
pdf["a"] = [1, 2, 3]
with pytest.raises(
ValueError, match="Duplicate column names are not allowed"
):
cudf.from_pandas(pdf)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(
{"a": [1, 2, 3], "b": [10, 11, 20], "c": ["a", "bcd", "xyz"]}
),
pd.DataFrame(),
],
)
@pytest.mark.parametrize(
"columns",
[
None,
["a"],
["c", "a"],
["b", "a", "c"],
[],
pd.Index(["c", "a"]),
cudf.Index(["c", "a"]),
["abc", "a"],
["column_not_exists1", "column_not_exists2"],
],
)
@pytest.mark.parametrize("index", [["abc", "def", "ghi"]])
def test_dataframe_constructor_columns(df, columns, index):
def assert_local_eq(actual, df, expected, host_columns):
check_index_type = False if expected.empty else True
if host_columns is not None and any(
col not in df.columns for col in host_columns
):
assert_eq(
expected,
actual,
check_dtype=False,
check_index_type=check_index_type,
)
else:
assert_eq(expected, actual, check_index_type=check_index_type)
gdf = cudf.from_pandas(df)
host_columns = (
columns.to_pandas() if isinstance(columns, cudf.Index) else columns
)
expected = pd.DataFrame(df, columns=host_columns, index=index)
actual = cudf.DataFrame(gdf, columns=columns, index=index)
assert_local_eq(actual, df, expected, host_columns)
@pytest.mark.parametrize(
"data",
[
{"a": [1, 2, 3], "b": [3.0, 4.0, 5.0], "c": [True, True, False]},
{"a": [1.0, 2.0, 3.0], "b": [3.0, 4.0, 5.0], "c": [True, True, False]},
{"a": [1, 2, 3], "b": [3, 4, 5], "c": [True, True, False]},
{"a": [1, 2, 3], "b": [True, True, False], "c": [False, True, False]},
{
"a": [1.0, 2.0, 3.0],
"b": [True, True, False],
"c": [False, True, False],
},
{"a": [1, 2, 3], "b": [3, 4, 5], "c": [2.0, 3.0, 4.0]},
{"a": [1, 2, 3], "b": [2.0, 3.0, 4.0], "c": [5.0, 6.0, 4.0]},
],
)
@pytest.mark.parametrize(
"aggs",
[
["min", "sum", "max"],
("min", "sum", "max"),
{"min", "sum", "max"},
"sum",
{"a": "sum", "b": "min", "c": "max"},
{"a": ["sum"], "b": ["min"], "c": ["max"]},
{"a": ("sum"), "b": ("min"), "c": ("max")},
{"a": {"sum"}, "b": {"min"}, "c": {"max"}},
{"a": ["sum", "min"], "b": ["sum", "max"], "c": ["min", "max"]},
{"a": ("sum", "min"), "b": ("sum", "max"), "c": ("min", "max")},
{"a": {"sum", "min"}, "b": {"sum", "max"}, "c": {"min", "max"}},
],
)
def test_agg_for_dataframes(data, aggs):
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
expect = pdf.agg(aggs).sort_index()
got = gdf.agg(aggs).sort_index()
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize("aggs", [{"a": np.sum, "b": np.min, "c": np.max}])
def test_agg_for_unsupported_function(aggs):
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [3.0, 4.0, 5.0], "c": [True, True, False]}
)
with pytest.raises(NotImplementedError):
gdf.agg(aggs)
@pytest.mark.parametrize("aggs", ["asdf"])
def test_agg_for_dataframe_with_invalid_function(aggs):
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [3.0, 4.0, 5.0], "c": [True, True, False]}
)
with pytest.raises(
AttributeError,
match=f"{aggs} is not a valid function for 'DataFrame' object",
):
gdf.agg(aggs)
@pytest.mark.parametrize("aggs", [{"a": "asdf"}])
def test_agg_for_series_with_invalid_function(aggs):
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [3.0, 4.0, 5.0], "c": [True, True, False]}
)
with pytest.raises(
AttributeError,
match=f"{aggs['a']} is not a valid function for 'Series' object",
):
gdf.agg(aggs)
@pytest.mark.parametrize(
"aggs",
[
"sum",
["min", "sum", "max"],
{"a": {"sum", "min"}, "b": {"sum", "max"}, "c": {"min", "max"}},
],
)
def test_agg_for_dataframe_with_string_columns(aggs):
gdf = cudf.DataFrame(
{"a": ["m", "n", "o"], "b": ["t", "u", "v"], "c": ["x", "y", "z"]},
index=["a", "b", "c"],
)
with pytest.raises(
NotImplementedError,
match=re.escape(
"DataFrame.agg() is not supported for "
"frames containing string columns"
),
):
gdf.agg(aggs)
@pytest.mark.parametrize(
"join", ["left"],
)
@pytest.mark.parametrize(
"overwrite", [True, False],
)
@pytest.mark.parametrize(
"errors", ["ignore"],
)
@pytest.mark.parametrize(
"data",
[
{"a": [1, 2, 3], "b": [3, 4, 5]},
{"e": [1.0, 2.0, 3.0], "d": [3.0, 4.0, 5.0]},
{"c": [True, False, False], "d": [False, True, True]},
{"g": [2.0, np.nan, 4.0], "n": [np.nan, np.nan, np.nan]},
{"d": [np.nan, np.nan, np.nan], "e": [np.nan, np.nan, np.nan]},
{"a": [1.0, 2, 3], "b": | pd.Series([4.0, 8.0, 3.0], index=[1, 2, 3]) | pandas.Series |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
def test_default_index(self):
# is_series and ignore_index
s1 = Series([1, 2, 3], name="x")
s2 = Series([4, 5, 6], name="y")
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
# use check_index_type=True to check the result have
# RangeIndex (default index)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_series and all inputs have no names
s1 = Series([1, 2, 3])
s2 = Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
exp.columns = pd.RangeIndex(2)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_dataframe and ignore_index
df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
res = pd.concat([df1, df2], axis=0, ignore_index=True)
exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
res = pd.concat([df1, df2], axis=1, ignore_index=True)
exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
def test_concat_multiindex_rangeindex(self):
# GH13542
# when multi-index levels are RangeIndex objects
# there is a bug in concat with objects of len 1
df = DataFrame(np.random.randn(9, 2))
df.index = MultiIndex(
levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],
)
res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
exp = df.iloc[[2, 3, 4, 5], :]
tm.assert_frame_equal(res, exp)
def test_concat_multiindex_dfs_with_deepcopy(self):
# GH 9967
from copy import deepcopy
example_multiindex1 = pd.MultiIndex.from_product([["a"], ["b"]])
example_dataframe1 = DataFrame([0], index=example_multiindex1)
example_multiindex2 = pd.MultiIndex.from_product([["a"], ["c"]])
example_dataframe2 = DataFrame([1], index=example_multiindex2)
example_dict = {"s1": example_dataframe1, "s2": example_dataframe2}
expected_index = pd.MultiIndex(
levels=[["s1", "s2"], ["a"], ["b", "c"]],
codes=[[0, 1], [0, 0], [0, 1]],
names=["testname", None, None],
)
expected = DataFrame([[0], [1]], index=expected_index)
result_copy = pd.concat(deepcopy(example_dict), names=["testname"])
tm.assert_frame_equal(result_copy, expected)
result_no_copy = pd.concat(example_dict, names=["testname"])
tm.assert_frame_equal(result_no_copy, expected)
def test_categorical_concat_append(self):
cat = Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1]))
tm.assert_frame_equal(pd.concat([df, df]), exp)
tm.assert_frame_equal(df.append(df), exp)
# GH 13524 can concat different categories
cat3 = | Categorical(["a", "b"], categories=["a", "b", "c"]) | pandas.Categorical |
#LC Jan/Feb 2022
# This module is part of the extensions to the project. Its purpose is to create a map of stations and relative water levels.
# This module is WIP
# Suggestions LC:
# Maybe try to implement importing directly to a pandas dataframe and stations?
# Maybe try to implement using a 'bubble map' with sliders for last x days
# Maybe try and change colour depending on flood risk - could wait for 2G to be done to do this
# Option to not view stations which have invalid ranges
# Option to not view stations with a relative water level below the tolerance
from turtle import pd
from numpy import integer
import pandas as pd
import plotly.express as px
from floodsystem.stationdata import update_water_levels
def generate_layer_map(stations, tol=0, colour = "black"):
update_water_levels(stations)
station_names = []
locations = []
relative_levels = []
lats = []
longs = []
towns = []
count = 0
for station in stations:
if station.relative_water_level() == None:
stations.pop(count)
elif (station.relative_water_level() > 5):
stations.pop(count)
elif (station.relative_water_level() < 0):
stations.pop(count)
count = count + 1
for item in stations:
station_names.append(item.name)
locations.append(item.coord)
relative_levels.append(item.relative_water_level())
towns.append(item.town)
for item in locations:
lats.append(item[0])
longs.append(item[1])
d = {'Station Name': station_names, 'Latitude' : lats, 'Longitude' : longs, 'Relative Level': relative_levels, 'Town': towns }
stations_data_frame = | pd.DataFrame(data=d) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This script is used for measuring some coefficients of the molecules."""
import numpy as np
import pandas as pd
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem, Crippen, Descriptors as desc, Lipinski, MolSurf
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler as Scaler
from tqdm import tqdm
from drugex.core import util
def converage(fnames):
"""This method parsed log files of reinforcement learning process
Arguments:
fnames (list): List of log file paths
Returns:
output (DataFrame): Table contains two columns, one is the index of
file name in the list, the other is the average reward of batch SMILES
given by the environment.
"""
xy = []
for i, fname in enumerate(fnames):
lines = open(fname).readlines()
for line in lines:
if not line.startswith('Epoch'): continue
# The average reward at current step in ith log file
score = float(line.split(' ')[3])
xy.append([i, score])
output = pd.DataFrame(xy, columns=['LABEL', 'SCORE'])
return output
def training_process(fname):
"""This method parsed log files of RNN training process
Arguments:
fname (str): log file paths of RNN training
Returns:
valid (ndarray): The validation rate at each epoch during the training process.
loss (ndarray): The value of loss function at each epoch during the training process.
"""
log = open(fname)
valid = []
loss = []
for line in log:
if not line.startswith('Epoch:'): continue
data = line.split(' ')
valid.append(float(data[-1]))
loss.append(float(data[-3]))
valid, loss = np.array(valid), np.array(loss)
return valid, loss
def logP_mw(fnames, is_active=False):
""" logP and molecular weight calculation for logP ~ MW chemical space visualization
Arguments:
fnames (list): List of file paths that contains CANONICAL_SMILES (, LOGP and MWT
if it contains the logP and molecular weight for each molecule).
is_active (bool, optional): selecting only active ligands (True) or all of the molecules (False)
if it is true, the molecule with PCHEMBL_VALUE >= 6.5 or SCORE > 0.5 will be selected.
(Default: False)
Returns:
df (DataFrame): The table contains three columns;
molecular weight, logP and index of file name in the fnames
"""
df = pd.DataFrame()
for i, fname in enumerate(fnames):
print(fname)
sub = pd.read_table(fname)
sub['LABEL'] = i
if 'PCHEMBL_VALUE' in sub.columns:
sub = sub[sub.PCHEMBL_VALUE >= (6.5 if is_active else 0)]
elif 'SCORE' in sub.columns:
sub = sub[sub.SCORE > (0.5 if is_active else 0)]
sub = sub.drop_duplicates(subset='CANONICAL_SMILES')
if not ('LOGP' in sub.columns and 'MWT' in sub.columns):
# If the the table does not contain LOGP and MWT
# it will calculate these coefficients with RDKit.
logp, mwt = [], []
for i, row in sub.iterrows():
try:
mol = Chem.MolFromSmiles(row.CANONICAL_SMILES)
x, y = desc.MolWt(mol), Crippen.MolLogP(mol)
logp.append(y)
mwt.append(x)
except:
sub = sub.drop(i)
print(row.CANONICAL_SMILES)
sub['LOGP'], sub['MWT'] = logp, mwt
df = df.append(sub[['MWT', 'LOGP', 'LABEL']])
return df
def dimension(fnames, fp='ECFP', is_active=False, alg='PCA', maximum=int(1e5)):
""" Dimension reduction analysis it contains two algorithms: PCA and t-SNE,
and two different descriptors: ECFP6 and PhysChem
Arguments:
fnames (list): List of file paths that contains CANONICAL_SMILES and SCORE (or PCHEMBL_VALUE).
fp (str, optional): The descriptors for each molecule, either ECFP6 or PhysChem (Default: 'ECFP')
is_active (bool, optional): selecting only active ligands (True) or all of the molecules (False)
if it is true, the molecule with PCHEMBL_VALUE >= 6.5 or SCORE > 0.5 will be selected.
(Default: False)
alg (str, optional): Dimension reduction algorithms, either 'PCA' or 't-SNE' (Default: 'PCA')
maximum (int, optional): Considering dimension reduction for the large dataset is extremely
time- and resource consuming, if the size of dataset in one file larger than this threshold,
maximum number of sample will be randomly selected (Default: 100,000)
Returns:
df (DataFrame): the table contains two columns, component 1 and 2.
"""
df = | pd.DataFrame() | pandas.DataFrame |
import argparse
import numpy as np
import pandas as pd
import sys
import datetime as dt
from dateutil.parser import parse
from Kernel import Kernel
from util import util
from util.order import LimitOrder
from util.oracle.SparseMeanRevertingOracle import SparseMeanRevertingOracle
from util.oracle.ExternalFileOracle import ExternalFileOracle
from model.LatencyModel import LatencyModel
from agent.ExchangeAgent import ExchangeAgent
from agent.NoiseAgent import NoiseAgent
from agent.ValueAgent import ValueAgent
from agent.market_makers.AdaptiveMarketMakerAgent import AdaptiveMarketMakerAgent
from agent.examples.MomentumAgent import MomentumAgent
from agent.execution.TWAPExecutionAgent import TWAPExecutionAgent
from agent.execution.VWAPExecutionAgent import VWAPExecutionAgent
from agent.execution.POVExecutionAgent import POVExecutionAgent
########################################################################################################################
############################################### GENERAL CONFIG #########################################################
parser = argparse.ArgumentParser(description='Detailed options for the config.')
parser.add_argument('-c',
'--config',
required=True,
help='Name of config file to execute')
parser.add_argument('-t',
'--ticker',
required=True,
help='Ticker (symbol) to use for simulation')
parser.add_argument('-d', '--historical-date',
required=True,
type=parse,
help='historical date being simulated in format YYYYMMDD.')
parser.add_argument('-f',
'--fundamental-file-path',
required=False,
help="Path to external fundamental file.")
parser.add_argument('-e',
'--execution_agents',
action='store_true',
help='Flag to add the execution agents')
parser.add_argument('-s',
'--seed',
type=int,
default=None,
help='numpy.random.seed() for simulation')
parser.add_argument('-l',
'--log_dir',
default=None,
help='Log directory name (default: unix timestamp at program start)')
parser.add_argument('-v',
'--verbose',
action='store_true',
help='Maximum verbosity!')
parser.add_argument('--config_help',
action='store_true',
help='Print argument options for this config file')
args, remaining_args = parser.parse_known_args()
if args.config_help:
parser.print_help()
sys.exit()
seed = args.seed # Random seed specification on the command line.
if not seed: seed = int(pd.Timestamp.now().timestamp() * 1000000) % (2 ** 32 - 1)
np.random.seed(seed)
util.silent_mode = not args.verbose
LimitOrder.silent_mode = not args.verbose
simulation_start_time = dt.datetime.now()
print("Simulation Start Time: {}".format(simulation_start_time))
print("Configuration seed: {}".format(seed))
######################## Agents Config #########################################################################
# Historical date to simulate.
historical_date = pd.to_datetime(args.historical_date)
mkt_open = historical_date + pd.to_timedelta('09:30:00')
mkt_close = historical_date + | pd.to_timedelta('11:30:00') | pandas.to_timedelta |
"""
Prepare training and testing datasets as CSV dictionaries 2.0
Created on 04/26/2019; modified on 11/06/2019
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
import re
# get all full paths of images
def image_ids_in(root_dir, ignore=['.DS_Store', 'dict.csv', 'all.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# Get intersection of 2 lists
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
def tile_ids_in(inp):
ids = []
try:
for id in os.listdir(inp['path']):
if '_{}.png'.format(str(inp['sldnum'])) in id:
ids.append([inp['slide'], inp['level'], inp['path']+'/'+id, inp['BMI'], inp['age'], inp['label']])
except FileNotFoundError:
print('Ignore:', inp['path'])
return ids
# pair tiles of 10x, 5x, 2.5x of the same area
def paired_tile_ids_in(patient, slide, tumor, label, root_dir):
dira = os.path.isdir(root_dir + 'level1')
dirb = os.path.isdir(root_dir + 'level2')
dirc = os.path.isdir(root_dir + 'level3')
if dira and dirb and dirc:
fac = 1000
ids = []
for level in range(1, 4):
dirr = root_dir + 'level{}'.format(str(level))
for id in os.listdir(dirr):
if '.png' in id:
x = int(float(id.split('x-', 1)[1].split('-', 1)[0]) / fac)
y = int(float(re.split('.p', id.split('y-', 1)[1])[0]) / fac)
ids.append([patient, slide, tumor, label, level, dirr + '/' + id, x, y])
ids = pd.DataFrame(ids, columns=['Patient_ID', 'Slide_ID', 'Tumor', 'label', 'level', 'path', 'x', 'y'])
idsa = ids.loc[ids['level'] == 1]
idsa = idsa.drop(columns=['level'])
idsa = idsa.rename(index=str, columns={"path": "L1path"})
idsb = ids.loc[ids['level'] == 2]
idsb = idsb.drop(columns=['Patient_ID', 'Slide_ID', 'Tumor', 'label', 'level'])
idsb = idsb.rename(index=str, columns={"path": "L2path"})
idsc = ids.loc[ids['level'] == 3]
idsc = idsc.drop(columns=['Patient_ID', 'Slide_ID', 'Tumor', 'label', 'level'])
idsc = idsc.rename(index=str, columns={"path": "L3path"})
idsa = pd.merge(idsa, idsb, on=['x', 'y'], how='left', validate="many_to_many")
idsa['x'] = idsa['x'] - (idsa['x'] % 2)
idsa['y'] = idsa['y'] - (idsa['y'] % 2)
idsa = pd.merge(idsa, idsc, on=['x', 'y'], how='left', validate="many_to_many")
idsa = idsa.drop(columns=['x', 'y'])
idsa = idsa.dropna()
idsa = sku.shuffle(idsa)
else:
print('Pass: ', root_dir)
idsa = pd.DataFrame(columns=['Patient_ID', 'Slide_ID', 'Tumor', 'label', 'L1path', 'L2path', 'L3path'])
return idsa
# Prepare label at per patient level
def big_image_sum(label_col, path, ref_file, pdmd='tumor', exclude=None):
ref = pd.read_csv(ref_file, header=0)
big_images = []
ref = ref.loc[ref[label_col].notna()]
for idx, row in ref.iterrows():
big_images.append([row['Patient_ID'], row['Slide_ID'], row['Tumor'],
path + "/{}/{}/{}/".format(str(row['Tumor']), str(row['Patient_ID']),
row['Slide_ID'].split('-')[-1]), row[label_col]])
datapd = pd.DataFrame(big_images, columns=['Patient_ID', 'Slide_ID', 'Tumor', 'path', 'label'])
datapd = datapd.dropna()
if exclude:
datapd = datapd[~datapd['Tumor'].isin(exclude)]
if pdmd != 'origin':
rm = []
for tu in list(datapd.Tumor.unique()):
for lb in list(datapd.label.unique()):
if datapd.loc[(datapd['Tumor'] == tu) & (datapd['label'] == lb)].shape[0] < 3:
rm.append(tu)
datapd = datapd[~datapd['Tumor'].isin(rm)]
print('Remove rare case cancer types if any: ', rm, flush=True)
return datapd
# seperate into training and testing; each type is the same separation ratio on big images
# test and train csv files contain tiles' path.
def set_sep(alll, path, cut=0.3):
trlist = []
telist = []
valist = []
for tm in list(alll.Tumor.unique()):
sub = alll[alll['Tumor'] == tm]
unq = list(sub.Patient_ID.unique())
np.random.shuffle(unq)
validation = unq[:np.max([int(len(unq) * cut / 2), 1])]
valist.append(sub[sub['Patient_ID'].isin(validation)])
test = unq[np.max([int(len(unq) * cut / 2), 1]):np.max([int(len(unq) * cut), 2])]
telist.append(sub[sub['Patient_ID'].isin(test)])
train = unq[np.max([int(len(unq) * cut), 2]):]
trlist.append(sub[sub['Patient_ID'].isin(train)])
test = pd.concat(telist)
train = pd.concat(trlist)
validation = pd.concat(valist)
test.to_csv(path + '/te_sample_raw.csv', header=True, index=False)
train.to_csv(path + '/tr_sample_raw.csv', header=True, index=False)
validation.to_csv(path + '/va_sample_raw.csv', header=True, index=False)
test_tiles = pd.DataFrame(columns=['Patient_ID', 'Slide_ID', 'Tumor', 'label', 'L1path', 'L2path', 'L3path'])
train_tiles = pd.DataFrame(columns=['Patient_ID', 'Slide_ID', 'Tumor', 'label', 'L1path', 'L2path', 'L3path'])
validation_tiles = pd.DataFrame(columns=['Patient_ID', 'Slide_ID', 'Tumor', 'label', 'L1path', 'L2path', 'L3path'])
for idx, row in test.iterrows():
tile_ids = paired_tile_ids_in(row['Patient_ID'], row['Slide_ID'], row['Tumor'], row['label'], row['path'])
test_tiles = pd.concat([test_tiles, tile_ids])
for idx, row in train.iterrows():
tile_ids = paired_tile_ids_in(row['Patient_ID'], row['Slide_ID'], row['Tumor'], row['label'], row['path'])
train_tiles = | pd.concat([train_tiles, tile_ids]) | pandas.concat |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = | pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx') | pandas.TimedeltaIndex |
import numpy as np
import pandas as pd
import pytest
from pydrograph.baseflow import IHmethod
@pytest.fixture(scope='function')
def test_data(test_data_path):
data = pd.read_csv(test_data_path / 'UV_04087088_Discharge_20071001_ab.csv')
data.index = pd.to_datetime(data.date)
return data
@pytest.mark.parametrize('freq', ('D', '6H'))
@pytest.mark.parametrize('interp_semilog', (False, True))
@pytest.mark.parametrize('block_length', [1, 2, 3])
def test_IHmethod(test_data, block_length, interp_semilog, freq):
results = IHmethod(test_data.Q, block_length=block_length, tp=0.9,
freq=freq,
interp_semilog=interp_semilog)
minimum_points = ~results.block_Qmin.isna()
assert np.all(results.loc[minimum_points, 'block_Qmin'] <= results.loc[minimum_points, 'Q'])
@pytest.mark.parametrize('data', (pytest.param( | pd.Series() | pandas.Series |
import poseconnect.utils
import poseconnect.defaults
import smc_kalman
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tqdm
from uuid import uuid4
import logging
import time
import itertools
import functools
import copy
logger = logging.getLogger(__name__)
def track_poses_3d(
poses_3d,
max_match_distance=poseconnect.defaults.TRACKING_MAX_MATCH_DISTANCE,
max_iterations_since_last_match=poseconnect.defaults.TRACKING_MAX_ITERATIONS_SINCE_LAST_MATCH,
centroid_position_initial_sd=poseconnect.defaults.TRACKING_CENTROID_POSITION_INITIAL_SD,
centroid_velocity_initial_sd=poseconnect.defaults.TRACKING_CENTROID_VELOCITY_INITIAL_SD,
reference_delta_t_seconds=poseconnect.defaults.TRACKING_REFERENCE_DELTA_T_SECONDS,
reference_velocity_drift=poseconnect.defaults.TRACKING_REFERENCE_VELOCITY_DRIFT,
position_observation_sd=poseconnect.defaults.TRACKING_POSITION_OBSERVATION_SD,
num_poses_per_track_min=poseconnect.defaults.TRACKING_NUM_POSES_PER_TRACK_MIN,
progress_bar=poseconnect.defaults.PROGRESS_BAR,
notebook=poseconnect.defaults.NOTEBOOK
):
poses_3d = poseconnect.utils.ingest_poses_3d(poses_3d)
pose_tracks_3d = update_pose_tracks_3d(
poses_3d=poses_3d,
pose_tracks_3d=None,
max_match_distance=max_match_distance,
max_iterations_since_last_match=max_iterations_since_last_match,
centroid_position_initial_sd=centroid_position_initial_sd,
centroid_velocity_initial_sd=centroid_velocity_initial_sd,
reference_delta_t_seconds=reference_delta_t_seconds,
reference_velocity_drift=reference_velocity_drift,
position_observation_sd=position_observation_sd,
progress_bar=progress_bar,
notebook=notebook
)
if num_poses_per_track_min is not None:
pose_tracks_3d.filter(
num_poses_min=num_poses_per_track_min,
inplace=True
)
poses_3d_with_tracks = (
poses_3d
.join(
pose_tracks_3d.output_df(),
how='inner'
)
)
return poses_3d_with_tracks
def update_pose_tracks_3d(
poses_3d,
pose_tracks_3d=None,
max_match_distance=poseconnect.defaults.TRACKING_MAX_MATCH_DISTANCE,
max_iterations_since_last_match=poseconnect.defaults.TRACKING_MAX_ITERATIONS_SINCE_LAST_MATCH,
centroid_position_initial_sd=poseconnect.defaults.TRACKING_CENTROID_POSITION_INITIAL_SD,
centroid_velocity_initial_sd=poseconnect.defaults.TRACKING_CENTROID_VELOCITY_INITIAL_SD,
reference_delta_t_seconds=poseconnect.defaults.TRACKING_REFERENCE_DELTA_T_SECONDS,
reference_velocity_drift=poseconnect.defaults.TRACKING_REFERENCE_VELOCITY_DRIFT,
position_observation_sd=poseconnect.defaults.TRACKING_POSITION_OBSERVATION_SD,
progress_bar=poseconnect.defaults.PROGRESS_BAR,
notebook=poseconnect.defaults.NOTEBOOK
):
if len(poses_3d) == 0:
return pose_tracks_3d
if pose_tracks_3d is None:
initial_timestamp = poses_3d['timestamp'].min()
initial_pose_3d_ids = poses_3d.loc[
poses_3d['timestamp'] == initial_timestamp
].index.values.tolist()
initial_keypoint_coordinates_3d = poses_3d.loc[
poses_3d['timestamp'] == initial_timestamp,
'keypoint_coordinates_3d'
].values.tolist()
initial_poses_3d = dict(zip(initial_pose_3d_ids, initial_keypoint_coordinates_3d))
pose_tracks_3d = PoseTracks3D(
timestamp=initial_timestamp,
poses_3d=initial_poses_3d,
centroid_position_initial_sd=centroid_position_initial_sd,
centroid_velocity_initial_sd=centroid_velocity_initial_sd,
reference_delta_t_seconds=reference_delta_t_seconds,
reference_velocity_drift=reference_velocity_drift,
position_observation_sd=position_observation_sd
)
pose_tracks_3d.update_df(
poses_3d=poses_3d.loc[poses_3d['timestamp'] != initial_timestamp],
progress_bar=progress_bar,
notebook=notebook
)
else:
pose_tracks_3d.update_df(
poses_3d=poses_3d,
progress_bar=progress_bar,
notebook=notebook
)
return pose_tracks_3d
def interpolate_pose_tracks_3d(
poses_3d_with_tracks,
frames_per_second=poseconnect.defaults.FRAMES_PER_SECOND
):
poses_3d_with_tracks = poseconnect.utils.ingest_poses_3d_with_tracks(poses_3d_with_tracks)
poses_3d_new_list=list()
for pose_track_3d_id, pose_track in poses_3d_with_tracks.groupby('pose_track_3d_id'):
poses_3d_new_track = interpolate_pose_track(
pose_track,
frames_per_second=frames_per_second
)
poses_3d_new_track['pose_track_3d_id'] = pose_track_3d_id
poses_3d_new_list.append(poses_3d_new_track)
poses_3d_new = pd.concat(poses_3d_new_list)
poses_3d_with_tracks_interpolated= pd.concat((
poses_3d_with_tracks,
poses_3d_new
))
poses_3d_with_tracks_interpolated.sort_values('timestamp', inplace=True)
return poses_3d_with_tracks_interpolated
def interpolate_pose_track(
pose_track_3d,
frames_per_second=poseconnect.defaults.FRAMES_PER_SECOND
):
if not isinstance(frames_per_second, int):
raise ValueError('Only integer frame rates currently supported')
if not 1000 % frames_per_second == 0:
raise ValueError('Only frame periods with integer number of milliseconds currently supported')
frame_period_milliseconds = 1000//frames_per_second
if pose_track_3d['timestamp'].duplicated().any():
raise ValueError('Pose data for single pose track contains duplicate timestamps')
pose_track_3d = pose_track_3d.copy()
pose_track_3d.sort_values('timestamp', inplace=True)
old_time_index = pd.DatetimeIndex(pose_track_3d['timestamp'])
combined_time_index = pd.date_range(
start=pose_track_3d['timestamp'].min(),
end=pose_track_3d['timestamp'].max(),
freq='{}ms'.format(frame_period_milliseconds),
name='timestamp'
)
new_time_index = combined_time_index.difference(old_time_index)
old_num_poses = len(old_time_index)
combined_num_poses = len(combined_time_index)
new_num_poses = len(new_time_index)
keypoints_flattened_df = pd.DataFrame(
np.stack(pose_track_3d['keypoint_coordinates_3d']).reshape((old_num_poses, -1)),
index=old_time_index
)
keypoints_flattened_interpolated_df = keypoints_flattened_df.reindex(combined_time_index).interpolate(method='time')
keypoints_flattened_interpolated_array = keypoints_flattened_interpolated_df.values
keypoints_interpolated_array = keypoints_flattened_interpolated_array.reshape((combined_num_poses, -1, 3))
keypoints_interpolated_array_unstacked = [keypoints_interpolated_array[i] for i in range(keypoints_interpolated_array.shape[0])]
poses_3d_interpolated = pd.Series(
keypoints_interpolated_array_unstacked,
index=combined_time_index,
name='keypoint_coordinates_3d'
).to_frame()
poses_3d_new = poses_3d_interpolated.reindex(new_time_index)
pose_3d_ids_new = [uuid4().hex for _ in range(len(poses_3d_new))]
poses_3d_new['pose_3d_id'] = pose_3d_ids_new
poses_3d_new = poses_3d_new.reset_index().set_index('pose_3d_id')
return poses_3d_new
class PoseTracks3D:
def __init__(
self,
timestamp,
poses_3d,
max_match_distance=poseconnect.defaults.TRACKING_MAX_MATCH_DISTANCE,
max_iterations_since_last_match=poseconnect.defaults.TRACKING_MAX_ITERATIONS_SINCE_LAST_MATCH,
centroid_position_initial_sd=poseconnect.defaults.TRACKING_CENTROID_POSITION_INITIAL_SD,
centroid_velocity_initial_sd=poseconnect.defaults.TRACKING_CENTROID_VELOCITY_INITIAL_SD,
reference_delta_t_seconds=poseconnect.defaults.TRACKING_REFERENCE_DELTA_T_SECONDS,
reference_velocity_drift=poseconnect.defaults.TRACKING_REFERENCE_VELOCITY_DRIFT,
position_observation_sd=poseconnect.defaults.TRACKING_POSITION_OBSERVATION_SD
):
self.max_match_distance = max_match_distance
self.max_iterations_since_last_match = max_iterations_since_last_match
self.centroid_position_initial_sd = centroid_position_initial_sd
self.centroid_velocity_initial_sd = centroid_velocity_initial_sd
self.reference_delta_t_seconds = reference_delta_t_seconds
self.reference_velocity_drift = reference_velocity_drift
self.position_observation_sd = position_observation_sd
self.active_tracks = dict()
self.inactive_tracks = dict()
for pose_3d_id, keypoint_coordinates_3d in poses_3d.items():
pose_track_3d = PoseTrack3D(
timestamp=timestamp,
pose_3d_id = pose_3d_id,
keypoint_coordinates_3d=keypoint_coordinates_3d,
centroid_position_initial_sd=self.centroid_position_initial_sd,
centroid_velocity_initial_sd=self.centroid_velocity_initial_sd,
reference_delta_t_seconds=self.reference_delta_t_seconds,
reference_velocity_drift=self.reference_velocity_drift,
position_observation_sd=self.position_observation_sd
)
self.active_tracks[pose_track_3d.pose_track_3d_id] = pose_track_3d
def update_df(
self,
poses_3d,
progress_bar=poseconnect.defaults.PROGRESS_BAR,
notebook=poseconnect.defaults.NOTEBOOK
):
timestamps = np.sort(poses_3d['timestamp'].unique())
if progress_bar:
if notebook:
timestamp_iterator = tqdm.notebook.tqdm(timestamps)
else:
timestamp_iterator = tqdm.tqdm(timestamps)
else:
timestamp_iterator = timestamps
for current_timestamp in timestamp_iterator:
current_pose_3d_ids = poses_3d.loc[
poses_3d['timestamp'] == current_timestamp
].index.values.tolist()
current_keypoint_coordinates_3d = poses_3d.loc[
poses_3d['timestamp'] == current_timestamp,
'keypoint_coordinates_3d'
].values.tolist()
current_poses_3d = dict(zip(current_pose_3d_ids, current_keypoint_coordinates_3d))
self.update(
timestamp=current_timestamp,
poses_3d=current_poses_3d
)
def update(
self,
timestamp,
poses_3d
):
self.predict(
timestamp=timestamp
)
self.incorporate_observations(
timestamp=timestamp,
poses_3d=poses_3d
)
def predict(
self,
timestamp
):
for pose_track_3d in self.active_tracks.values():
pose_track_3d.predict(timestamp)
def incorporate_observations(
self,
timestamp,
poses_3d
):
matches = self.match_observations_to_pose_tracks_3d(
poses_3d=poses_3d
)
matched_pose_tracks_3d = set(matches.keys())
matched_poses = set(matches.values())
unmatched_pose_tracks_3d = set(self.active_tracks.keys()) - matched_pose_tracks_3d
unmatched_poses = set(poses_3d.keys()) - matched_poses
for pose_track_3d_id, pose_3d_id in matches.items():
self.active_tracks[pose_track_3d_id].iterations_since_last_match = 0
self.active_tracks[pose_track_3d_id].incorporate_observation(
pose_3d_id = pose_3d_id,
keypoint_coordinates_3d = poses_3d[pose_3d_id],
)
for pose_track_3d_id in unmatched_pose_tracks_3d:
self.active_tracks[pose_track_3d_id].iterations_since_last_match += 1
if self.active_tracks[pose_track_3d_id].iterations_since_last_match > self.max_iterations_since_last_match:
self.inactive_tracks[pose_track_3d_id] = self.active_tracks.pop(pose_track_3d_id)
for pose_3d_id in unmatched_poses:
pose_track_3d = PoseTrack3D(
timestamp=timestamp,
pose_3d_id=pose_3d_id,
keypoint_coordinates_3d=poses_3d[pose_3d_id],
centroid_position_initial_sd=self.centroid_position_initial_sd,
centroid_velocity_initial_sd=self.centroid_velocity_initial_sd,
reference_delta_t_seconds=self.reference_delta_t_seconds,
reference_velocity_drift=self.reference_velocity_drift,
position_observation_sd=self.position_observation_sd
)
self.active_tracks[pose_track_3d.pose_track_3d_id] = pose_track_3d
def match_observations_to_pose_tracks_3d(
self,
poses_3d
):
pose_track_3d_ids = self.active_tracks.keys()
pose_3d_ids = poses_3d.keys()
distances = pd.DataFrame(
index = pose_track_3d_ids,
columns = pose_3d_ids,
dtype='float'
)
for pose_track_3d_id, pose_3d_id in itertools.product(pose_track_3d_ids, pose_3d_ids):
track_position = self.active_tracks[pose_track_3d_id].centroid_distribution.mean[:3]
observation_position = np.nanmean(poses_3d[pose_3d_id], axis=0)
distance = np.linalg.norm(
np.subtract(
track_position,
observation_position
)
)
if distance < self.max_match_distance:
distances.loc[pose_track_3d_id, pose_3d_id] = distance
best_track_for_each_pose = distances.idxmin(axis=0)
best_pose_for_each_track = distances.idxmin(axis=1)
matches = dict(
set(zip(best_pose_for_each_track.index, best_pose_for_each_track.values)) &
set(zip(best_track_for_each_pose.values, best_track_for_each_pose.index))
)
return matches
def filter(
self,
num_poses_min=poseconnect.defaults.TRACKING_NUM_POSES_PER_TRACK_MIN,
inplace=False
):
if not inplace:
new_pose_tracks_3d = copy.deepcopy(self)
else:
new_pose_tracks_3d = self
new_pose_tracks_3d.active_tracks = dict(filter(
lambda key_value_tuple: key_value_tuple[1].num_poses() >= num_poses_min,
new_pose_tracks_3d.active_tracks.items()
))
new_pose_tracks_3d.inactive_tracks = dict(filter(
lambda key_value_tuple: key_value_tuple[1].num_poses() >= num_poses_min,
new_pose_tracks_3d.inactive_tracks.items()
))
if not inplace:
return new_pose_tracks_3d
def extract_pose_tracks_3d(
self,
poses_3d
):
input_index_name = poses_3d.index.name
poses_3d_with_tracks = poses_3d.join(
self.output_df(),
how='inner'
)
poses_3d_with_tracks.index.name = input_index_name
return poses_3d_with_tracks
def output(self):
output = {pose_track_3d_id: pose_track_3d.output() for pose_track_3d_id, pose_track_3d in self.tracks().items()}
return output
def output_df(self):
df = pd.concat(
[pose_track_3d.output_df() for pose_track_3d in self.tracks().values()]
)
return df
def tracks(self):
return {**self.active_tracks, **self.inactive_tracks}
def plot_trajectories(
self,
pose_track_3d_ids,
track_label_lookup=None,
fig_width_inches=8.0,
fig_height_inches=10.5,
show=True
):
if track_label_lookup is None:
track_label_lookup = {pose_track_3d_id: pose_track_3d_id[:2] for pose_track_3d_id in pose_track_3d_ids}
fig, axes = plt.subplots(3, 1, sharex=True)
for pose_track_3d_id in pose_track_3d_ids:
for axis_index, axis_name in enumerate(['x', 'y', 'z']):
self.tracks()[pose_track_3d_id].draw_trajectory(
axis_index=axis_index,
axis_name=axis_name,
axis_object=axes[axis_index],
track_label_lookup=track_label_lookup
)
axes[0].legend(loc='upper left', bbox_to_anchor=(1.0, 1.0))
axes[2].set_xlabel('Time')
fig.autofmt_xdate()
fig.set_size_inches(fig_width_inches, fig_height_inches)
if show:
plt.show()
class PoseTrack3D:
def __init__(
self,
timestamp,
pose_3d_id,
keypoint_coordinates_3d,
centroid_position_initial_sd=poseconnect.defaults.TRACKING_CENTROID_POSITION_INITIAL_SD,
centroid_velocity_initial_sd=poseconnect.defaults.TRACKING_CENTROID_VELOCITY_INITIAL_SD,
reference_delta_t_seconds=poseconnect.defaults.TRACKING_REFERENCE_DELTA_T_SECONDS,
reference_velocity_drift=poseconnect.defaults.TRACKING_REFERENCE_VELOCITY_DRIFT,
position_observation_sd=poseconnect.defaults.TRACKING_POSITION_OBSERVATION_SD
):
keypoint_coordinates_3d = np.asarray(keypoint_coordinates_3d)
if keypoint_coordinates_3d.ndim != 2:
raise ValueError('Keypoint coordinate array should be two dimensional (Number of keypoints x 3)')
centroid_position = np.nanmean(keypoint_coordinates_3d, axis=0)
self.pose_track_3d_id = pose_track_3d_id = uuid4().hex
self.initial_timestamp = timestamp
self.latest_timestamp = timestamp
self.pose_3d_ids = [pose_3d_id]
self.centroid_distribution = smc_kalman.GaussianDistribution(
mean=np.concatenate((centroid_position.reshape((3,)), np.repeat(0.0, 3))),
covariance=np.diag(np.concatenate((
np.repeat(centroid_position_initial_sd**2, 3),
np.repeat(centroid_velocity_initial_sd**2, 3)
)))
)
self.reference_delta_t_seconds = reference_delta_t_seconds
self.reference_velocity_drift = reference_velocity_drift
self.position_observation_sd = position_observation_sd
self.iterations_since_last_match = 0
self.centroid_distribution_trajectory = {
'timestamp': [self.latest_timestamp],
'observed_centroid': [centroid_position],
'mean': [self.centroid_distribution.mean],
'covariance': [self.centroid_distribution.covariance]
}
def predict(
self,
timestamp
):
delta_t_seconds = (timestamp - self.latest_timestamp).total_seconds()
self.centroid_distribution = self.centroid_distribution.predict(
linear_gaussian_model=constant_velocity_model(
delta_t_seconds=delta_t_seconds,
reference_delta_t_seconds=self.reference_delta_t_seconds,
reference_velocity_drift=self.reference_velocity_drift,
position_observation_sd=self.position_observation_sd
)
)
self.latest_timestamp=timestamp
self.centroid_distribution_trajectory['timestamp'].append(self.latest_timestamp)
self.centroid_distribution_trajectory['observed_centroid'].append(np.array([np.nan, np.nan, np.nan]))
self.centroid_distribution_trajectory['mean'].append(self.centroid_distribution.mean)
self.centroid_distribution_trajectory['covariance'].append(self.centroid_distribution.covariance)
def incorporate_observation(
self,
pose_3d_id,
keypoint_coordinates_3d
):
keypoint_coordinates_3d = np.asarray(keypoint_coordinates_3d)
if keypoint_coordinates_3d.ndim != 2:
raise ValueError('Keypoint coordinate array should be two dimensional (Number of keypoints x 3)')
centroid_position = np.nanmean(keypoint_coordinates_3d, axis=0)
self.pose_3d_ids.append(pose_3d_id)
self.centroid_distribution = self.centroid_distribution.incorporate_observation(
linear_gaussian_model=constant_velocity_model(
delta_t_seconds=None,
reference_delta_t_seconds=self.reference_delta_t_seconds,
reference_velocity_drift=self.reference_velocity_drift,
position_observation_sd=self.position_observation_sd
),
observation_vector=centroid_position
)
self.centroid_distribution_trajectory['observed_centroid'][-1] = centroid_position
self.centroid_distribution_trajectory['mean'][-1] = self.centroid_distribution.mean
self.centroid_distribution_trajectory['covariance'][-1] = self.centroid_distribution.covariance
def num_poses(
self
):
return(len(self.pose_3d_ids))
def centroid_distribution_trajectory_df(self):
df = pd.DataFrame({
'timestamp': self.centroid_distribution_trajectory['timestamp'],
'observed_centroid': self.centroid_distribution_trajectory['observed_centroid'],
'position': [mean[:3] for mean in self.centroid_distribution_trajectory['mean']],
'velocity': [mean[3:] for mean in self.centroid_distribution_trajectory['mean']],
'covariance': self.centroid_distribution_trajectory['covariance']
})
df.set_index('timestamp', inplace=True)
return df
def output(self):
output = {
'start': pd.to_datetime(self.initial_timestamp).to_pydatetime(),
'end': | pd.to_datetime(self.latest_timestamp) | pandas.to_datetime |
import pandas as pd
# Tools for machine learning
import pickle
import time
import xgboost as xgb
from sklearn.model_selection import train_test_split
matches = | pd.read_csv('data/seasons_merged.csv') | pandas.read_csv |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import get_meta
@pytest.fixture(scope="module") # type: ignore
def postgres_url() -> str:
conn = os.environ["POSTGRES_URL"]
return conn
def test_get_meta(postgres_url: str) -> None:
query = "SELECT * FROM test_table limit 10"
df = get_meta(
postgres_url,
query,
)
expected = pd.DataFrame(
data={
"test_int": | pd.Series([], dtype="Int64") | pandas.Series |
import re
from collections import defaultdict
from typing import List, Dict, Optional, Callable, Tuple
import numpy as np
import pandas as pd
from tqdm import tqdm
from lexsubgen.datasets.nlu import NLUDatasetReader
from lexsubgen.subst_generator import SubstituteGenerator
from lexsubgen.utils.augmentation import (
get_slot_positions,
pick_random_target,
SpaceTokenizer,
)
from lexsubgen.utils.batch_reader import BatchReader
class Augmenter:
def __init__(
self,
substitute_generator: SubstituteGenerator,
max_num_augmented_utters: int = 5,
max_substitute_usage: int = 3,
num_replacements: int = 1,
possibilities_proportion: float = 0.5,
max_num_possibilities: int = None,
tokenizer: Optional[Callable] = None,
slot_tokenizer: Optional[Callable] = None,
batch_size: int = 50,
verbose: bool = False,
):
"""
Class for generate augmented utterances using Substitute Generator.
It can be used in 2 cases:
1. Augment arbitrary utterances with method 'augment_utterances'.
2. Augment whole dataset (like SNIPS) with method 'augment_dataset'.
Args:
substitute_generator: Object that generate possible substitutes
max_num_augmented_utters: Maximum number of augmented utterances generated from one utterance
max_substitute_usage: Maximum replacement number of substitute for one utterance
num_replacements: Number of words which will be replaced in augmented version of utterance
possibilities_proportion: Fraction of tokens in utterance to be candidates to replace
max_num_possibilities: Maximum number of tokens to be candidates to replace
tokenizer: Tokenizer for tokenizing input utterance
slot_tokenizer: Tokenizer for tokenizing slot tags sequence
batch_size: Number of samples in batch for substitute generator
verbose: Bool flag for verbosity
"""
self.substitute_generator = substitute_generator
self.tokenizer = tokenizer or SpaceTokenizer()
self.slot_tokenizer = slot_tokenizer or SpaceTokenizer()
self.max_num_augmented_utters = max_num_augmented_utters
self.max_substitute_usage = max_substitute_usage
self.num_replacements = num_replacements
self.possibilities_proportion = possibilities_proportion
self.max_num_possibilities = max_num_possibilities
self.batch_size = batch_size
self.verbose = verbose
def generate_possible_substitutes(
self, utterances_tokens: List[List[str]], target_ids: List[int]
) -> List[List[Dict]]:
"""
Function used to generate substitutes for given utterances on chosen indices.
Args:
utterances_tokens: list of tokenized utterances
target_ids: list of chosen indices
Returns:
possible_substitutes: for each utterance return list of possible substitutes with their probabilities
"""
possible_substitutes = list()
generated = self.substitute_generator.generate_substitutes(
utterances_tokens, target_ids, return_probs=True
)
utterances_substitutes, word2id, probabilities = generated
for utterance_idx, substitutes in enumerate(utterances_substitutes):
substitute_ids = [word2id[s] for s in substitutes]
substitute_probabilities = probabilities[utterance_idx, substitute_ids]
possible_substitutes.append(
[
{"word": word, "p": prob, "num_used": 0}
for word, prob in zip(substitutes, substitute_probabilities)
]
)
return possible_substitutes
def apply_possible_substitutes(
self,
utterances: List[List[str]],
slots: List[List[str]],
possible_substitutes: List[Dict[int, List]],
) -> Tuple[Dict, Dict]:
"""
Generate augmented utterances with given substitutes in possible positions
Args:
utterances: Tokenized utterances
slots: Slot tags in BIO-format
possible_substitutes: List of possible substitutes for each utterance.
List element is a Dictionary that maps possible positions (for substitution) to generated substitutes.
Returns:
augmented_utterances: Dict that maps utterance number to generated utterances
augmented_slots: Dict of corresponding tags
"""
augmented_utterances, augmented_slots = defaultdict(list), defaultdict(list)
for sample_idx, (utterance, tags, substitutes) in enumerate(
zip(utterances, slots, possible_substitutes)
):
target_ids = np.array(list(substitutes.keys()))
num_augmented = 0
while num_augmented < self.max_num_augmented_utters:
# Pick positions to replace tokens
mask = pick_random_target(
len(target_ids), proportion=1.0, max_num=self.num_replacements
)
replacement_ids = target_ids[mask].tolist()
# For each picked position pick substitutes from possible substitutes
replacement_words = []
for idx in replacement_ids:
available_substitutes = [
(i, s["word"], s["p"])
for i, s in enumerate(substitutes[idx])
if s["used"] <= self.max_substitute_usage
]
if not available_substitutes:
break
substitutes_ids, substitute_words, substitute_probs = list(
zip(*available_substitutes)
)
substitute_probs = np.array(substitute_probs)
substitute_probs /= substitute_probs.sum()
picked_subst_idx = np.random.choice(
substitutes_ids, size=1, p=substitute_probs
)[0]
substitutes[idx][picked_subst_idx]["used"] += 1
replacement_words.append(substitute_words[picked_subst_idx])
if not replacement_words:
break
new_tokens, new_slots = utterance.copy(), tags.copy()
for idx, word in zip(replacement_ids, replacement_words):
if re.search("[-_/]", word):
word_tokens = re.split("[-_/]", word)
left_context, right_context = (
new_tokens[:idx],
new_tokens[idx + 1 :],
)
new_tokens = left_context + word_tokens + right_context
left_tags, right_tags = tags[:idx], tags[idx + 1 :]
target_slots = [tags[idx]]
if tags[idx].lower().startswith("b-"):
target_slots.extend(
[
"i-" + tags[idx][2:]
for _ in range(len(word_tokens) - 1)
]
)
else:
target_slots.extend([tags[idx]] * (len(word_tokens) - 1))
new_slots = left_tags + target_slots + right_tags
else:
new_tokens[idx] = word
augmented_utterances[sample_idx].append(" ".join(new_tokens))
augmented_slots[sample_idx].append(" ".join(new_slots))
num_augmented += 1
return augmented_utterances, augmented_slots
def augment_utterances(
self, utterances: List[str], slots: Optional[List[str]] = None
) -> Tuple[Dict, Dict]:
"""
Main function which perform augmentation on given utterances.
Additionally function can gets Slot BIO-tags.
If slots are given, then function will replace only word with B- or I- tags.
Args:
utterances: List of input utterances,
slots: Sequences of slots for each input utterance.
Length (in tokens) of utterance and corresponding slot sequence must be equal.
Returns:
augmented_utterances: Dict that maps utterance number to generated utterances
augmented_slots: Dict of corresponding tags
"""
# Tokenize input utterance to pass it to generator
utterances = [self.tokenizer(utterance) for utterance in utterances]
# Get possible positions for generating substitutes
if slots:
slots = [self.slot_tokenizer(s) for s in slots]
possible_positions = get_slot_positions(slots)
else:
slots = [["O"] * len(utterance) for utterance in utterances]
possible_positions = [np.arange(len(utterance)) for utterance in utterances]
# Pick target positions from possible positions
possible_substitutes = []
tokens_lists_for_generator, target_ids_for_generator = [], []
for idx, positions in enumerate(possible_positions):
mask = pick_random_target(
sequence_length=len(positions),
proportion=self.possibilities_proportion,
max_num=self.max_num_possibilities,
)
picked_ids = positions[mask].tolist()
possible_substitutes.append({idx: None for idx in picked_ids})
tokens_lists_for_generator.extend([utterances[idx]] * len(picked_ids))
target_ids_for_generator.extend(picked_ids)
# Generate possible substitutions on picked target positions
batch_reader = BatchReader(
tokens_lists_for_generator,
target_ids_for_generator,
batch_size=self.batch_size,
)
utterance_idx = 0
for tokens, targets in batch_reader:
generated = self.generate_possible_substitutes(tokens, targets)
for substitutes, target_idx in zip(generated, targets):
if possible_substitutes[utterance_idx].get(target_idx, True):
utterance_idx += 1
possible_substitutes[utterance_idx][target_idx] = substitutes
return self.apply_possible_substitutes(utterances, slots, possible_substitutes)
def augment_dataset(self, dataset_name: str, merge_with_origin: bool = True):
"""
Args:
dataset_name: name of augmenting dataset. It used for creating dataset reader.
merge_with_origin: bool flag.
If True then method merge augmented utterance with original in result DataFrame.
Returns:
augmented_dataset: pandas DataFrame with augmented utterances.
DataFrame has 3 columns - 'utterance', 'intent', 'slots'
"""
reader = NLUDatasetReader(dataset_name)
dataset = reader.read_dataset()
utterances = dataset.utterance.tolist()
slots = dataset.slots.tolist()
intents = dataset.intent.tolist()
augmented_dataset = | pd.DataFrame(columns=dataset.columns) | pandas.DataFrame |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import requests
import time
from datetime import datetime
import pandas as pd
from urllib import parse
from config import ENV_VARIABLE
from os.path import getsize
fold_path = "./crawler_data/"
page_Max = 100
def stripID(url, wantStrip):
loc = url.find(wantStrip)
length = len(wantStrip)
return url[loc+length:]
def Kklee():
shop_id = 13
name = 'kklee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.kklee.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
#
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-12 ProductList-list']/a[%i]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//a[%i]/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[3]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Wishbykorea():
shop_id = 14
name = 'wishbykorea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.wishbykorea.com/collection-727&pgno=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
print(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div/div/label" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a[@href]" % (i,)).get_attribute('href')
page_id = page_link.replace("https://www.wishbykorea.com/collection-view-", "").replace("&ca=727", "")
find_href = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]/label" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
if(sale_price == "0"):
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Aspeed():
shop_id = 15
name = 'aspeed'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aspeed.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=72"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 73):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 73):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 73):
p += 1
continue
i += 1
if(i == 73):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Openlady():
shop_id = 17
name = 'openlady'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.openlady.tw/item.html?&id=157172&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@class='mymy_item_link']" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("&id=", "")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_img']/a[@class='mymy_item_link']/img[@src]" % (i,)).get_attribute("src")
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = | pd.concat([dfAll, df]) | pandas.concat |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
# from dotenv import find_dotenv, load_dotenv
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import datetime
import yfinance as yf
from pandas_datareader import data as pdr
from flask import current_app
from stk_predictor.extensions import db
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
def get_ticker_from_yahoo(ticker, start_date, end_date):
yf.pdr_override()
try:
new_trading_df = pdr.get_data_yahoo(
ticker, start_date, end_date, interval='1d')
new_trading_df = new_trading_df.drop(
['Open', 'High', 'Low', 'Adj Close'], axis=1)
new_trading_df = new_trading_df.dropna('index')
new_trading_df = new_trading_df.reset_index()
new_trading_df.columns = ['trading_date',
'intraday_close', 'intraday_volumes']
his_trading_df = pd.read_sql('aapl', db.engine, index_col='id')
df = pd.concat([his_trading_df, new_trading_df]
).drop_duplicates('trading_date')
df = df.sort_values(by='trading_date')
df = df.reset_index(drop=True)
if len(df) > 0:
df.to_sql("aapl", db.engine, if_exists='replace', index_label='id')
return df
else:
# t = pd.read_sql('aapl', db.engine, index_col='id')
return None
except Exception as ex:
raise RuntimeError(
"Catch Excetion when retrieve data from Yahoo...", ex)
return None
def get_news_from_finviz(ticker):
"""Request news headline from finviz, according to
company ticker's name
Parameters
-----------
ticker: str
the stock ticker name
Return
----------
df : pd.DataFrame
return the latest 2 days news healines.
"""
current_app.logger.info("Job >> Enter Finviz news scrape step...")
base_url = 'https://finviz.com/quote.ashx?t={}'.format(ticker)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36'
}
parsed_news = []
try:
res = requests.get(base_url, headers=headers)
if res.status_code == 200:
texts = res.text
soup = BeautifulSoup(texts)
news_tables = soup.find(id="news-table")
for x in news_tables.findAll('tr'):
text = x.a.get_text()
date_scrape = x.td.text.split()
if len(date_scrape) == 1:
time = date_scrape[0]
else:
date = date_scrape[0]
time = date_scrape[1]
parsed_news.append([date, time, text])
# filter the recent day news
df = pd.DataFrame(parsed_news, columns=['date', 'time', 'texts'])
df['date'] = pd.to_datetime(df.date).dt.date
one_day_period = (datetime.datetime.today() -
datetime.timedelta(days=1)).date()
df_sub = df[df.date >= one_day_period]
return df_sub
else:
raise RuntimeError("HTTP response Error {}".format(
res.status_code)) from None
except Exception as ex:
current_app.logger.info("Exception in scrape Finviz.", ex)
raise RuntimeError("Exception in scrape Finviz.") from ex
def prepare_trading_dataset(df):
"""Prepare the trading data set.
Time series analysis incoporate previous data for future prediction,
We need to retrieve historical data to generate features.
Parameters
-----------
df: DataFrame
the stock ticker trading data, including trading-date, close-price, volumes
window: int, default = 400
feature engineer windows size. Using at most 400 trading days to construct
features.
Return
----------
array_lstm : np.array
return the array with 3 dimensions shape -> [samples, 1, features]
"""
if len(df) == 0:
raise RuntimeError(
"Encounter Error in >>make_dataset.prepare_trading_dataset<<... \
Did not catch any news.") from None
else:
df['log_ret_1d'] = np.log(df['intraday_close'] / df['intraday_close'].shift(1))
df['log_ret_1w'] = pd.Series(df['log_ret_1d']).rolling(window=5).sum()
df['log_ret_2w'] = pd.Series(df['log_ret_1d']).rolling(window=10).sum()
df['log_ret_3w'] = pd.Series(df['log_ret_1d']).rolling(window=15).sum()
df['log_ret_4w'] = pd.Series(df['log_ret_1d']).rolling(window=20).sum()
df['log_ret_8w'] = pd.Series(df['log_ret_1d']).rolling(window=40).sum()
df['log_ret_12w'] = pd.Series(df['log_ret_1d']).rolling(window=60).sum()
df['log_ret_16w'] = pd.Series(df['log_ret_1d']).rolling(window=80).sum()
df['log_ret_20w'] = pd.Series(df['log_ret_1d']).rolling(window=100).sum()
df['log_ret_24w'] = pd.Series(df['log_ret_1d']).rolling(window=120).sum()
df['log_ret_28w'] = pd.Series(df['log_ret_1d']).rolling(window=140).sum()
df['log_ret_32w'] = pd.Series(df['log_ret_1d']).rolling(window=160).sum()
df['log_ret_36w'] = pd.Series(df['log_ret_1d']).rolling(window=180).sum()
df['log_ret_40w'] = pd.Series(df['log_ret_1d']).rolling(window=200).sum()
df['log_ret_44w'] = pd.Series(df['log_ret_1d']).rolling(window=220).sum()
df['log_ret_48w'] = pd.Series(df['log_ret_1d']).rolling(window=240).sum()
df['log_ret_52w'] = pd.Series(df['log_ret_1d']).rolling(window=260).sum()
df['log_ret_56w'] = pd.Series(df['log_ret_1d']).rolling(window=280).sum()
df['log_ret_60w'] = pd.Series(df['log_ret_1d']).rolling(window=300).sum()
df['log_ret_64w'] = pd.Series(df['log_ret_1d']).rolling(window=320).sum()
df['log_ret_68w'] = pd.Series(df['log_ret_1d']).rolling(window=340).sum()
df['log_ret_72w'] = pd.Series(df['log_ret_1d']).rolling(window=360).sum()
df['log_ret_76w'] = pd.Series(df['log_ret_1d']).rolling(window=380).sum()
df['log_ret_80w'] = pd.Series(df['log_ret_1d']).rolling(window=400).sum()
df['vol_1w'] = pd.Series(df['log_ret_1d']).rolling(window=5).std()*np.sqrt(5)
df['vol_2w'] = pd.Series(df['log_ret_1d']).rolling(window=10).std()*np.sqrt(10)
df['vol_3w'] = pd.Series(df['log_ret_1d']).rolling(window=15).std()*np.sqrt(15)
df['vol_4w'] = pd.Series(df['log_ret_1d']).rolling(window=20).std()*np.sqrt(20)
df['vol_8w'] = pd.Series(df['log_ret_1d']).rolling(window=40).std()*np.sqrt(40)
df['vol_12w'] = pd.Series(df['log_ret_1d']).rolling(window=60).std()*np.sqrt(60)
df['vol_16w'] = pd.Series(df['log_ret_1d']).rolling(window=80).std()*np.sqrt(80)
df['vol_20w'] = pd.Series(df['log_ret_1d']).rolling(window=100).std()*np.sqrt(100)
df['vol_24w'] = pd.Series(df['log_ret_1d']).rolling(window=120).std()*np.sqrt(120)
df['vol_28w'] = pd.Series(df['log_ret_1d']).rolling(window=140).std()*np.sqrt(140)
df['vol_32w'] = | pd.Series(df['log_ret_1d']) | pandas.Series |
import requests
import json
import pandas as pd
import datetime
import asyncio
import os
from sklearn.preprocessing import Normalizer
import logging
import sys
from dotenv import load_dotenv
from csv import writer
load_dotenv() # Load environment variables
def gen_daily_data(name, lat, lon, t_start, t_end, component_names):
'''
Generate data for a given gas/particulate for a given city over a set time period and write to csv
@params:
name: Name of location
lat, lon: Latitude and longitude in decimal degrees
t_start, t_end: Starting and ending epoch in Unix time
'''
# Connect to endpoint and load data
endpoint = 'http://api.openweathermap.org/data/2.5/air_pollution/history?lat={LAT}&lon={LON}&start={START}&end={END}&appid={KEY}'.format(
LAT=lat,
LON=lon,
START=t_start,
END=t_end,
KEY=os.getenv('OPEN_WEATHER_KEY')
)
page = requests.get(url=endpoint)
content = json.loads(page.content)
df = pd.json_normalize(content)
# List all records
ls = df['list'][0]
df_size = len(ls)
# Hourly time series list of each entry; daily total of each entry
ts_list = []
component_dict = {}
total = 0
# Find the daily averages of each component; merge into component list
for component in component_names:
for i in range(df_size):
total+=ls[i]['components'][component]
# Average for each day
if (i%24 == 0):
daily_value = round(total/24, 5)
ts_list.append(daily_value)
total = 0
# Each time series list goes into a new dict entry
component_dict[component] = ts_list
# Clear list for next iteration
ts_list = []
return component_dict
def unit_norm(file_in, file_out, component_names, dims):
'''
Unit Normalize the gas data on a 0-1 scale and save to file
@params:
file_in: File containing input data
file_out: Destination to output unit normalized data
component_names: List of component gases
dims: Specify number of dimensions
'''
for i, component in enumerate(component_names):
print('---------- Normalizing component {} ----------'.format(component))
try:
df = pd.read_csv(file_in[i])
except:
logging.error('Unit normalization failed, input file not recognized')
sys.exit(1)
# Features list and removal of city, lat, lon
features = list(df.columns.values)
del features[:3]
del features[-1]
# y value list using last day of 7-month data
y = df.loc[:, ['{}_2021_06_06'.format(component)]].values
# Normalize x values; save in data frame
x = df.loc[:, features].values
x = Normalizer().fit_transform(x)
dfx = pd.DataFrame(x)
# Create columns names for each dimension
norm_labels = ['dim_{}'.format(i) for i in range(1, dims+2)]
dfx.columns = norm_labels
# Write to file
write_data = pd.DataFrame(data=dfx)
write_data.to_csv(path_or_buf=file_out[i], index=False)
def gen_cols(t_start, t_end, component):
'''
Get list of column names based on the number of entries (each daily average of data will be one column)
Derive number of entries from start and end
@params:
t_start, t_end: Start and end times in unix time
element: String to represent the gas/particulate name to use
'''
# Change in epoch to number of hours gets us total entries
num_entries = int((t_end - t_start) / 86400)
time_step = t_start
col_names = ['city', 'lat', 'lon']
for i in range(num_entries):
# Convert daily interval to human-readable
timedate = datetime.datetime.fromtimestamp(time_step)
time_string = timedate.strftime('{COMPONENT}_%Y_%m_%d'.format(COMPONENT=component))
# Increment time_step
time_step+=86400
# Append col to list
col_names.append(time_string)
return col_names
def batch_request(f_name, t_start, t_end, city_df, component_names):
'''
Places a batch request for gas/particulate levels on every minute
@params:
f_name: Name of files to write to
col_names: List of columns including city_name, lat/lon, and PM2.5 features
city_df: Data frame containing city information
'''
# Set up event loop
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(request_buffer(f_name, t_start, t_end, city_df, component_names))
except KeyboardInterrupt:
pass
finally:
print('Exiting loop')
loop.close()
async def request_buffer(f_name, t_start, t_end, city_df, component_names):
''' Async function to buffer API requests to 60/min
@params:
f_name: name of files to write to
city_df: Data frame containing city information
component names: List of component gases
'''
curr_index = 0
df_size = len(city_df)
# Run batch request every minute
while (curr_index <= curr_index+60):
print('\nPlacing batch request...')
# Loop through 60 cities from current index
for i in range(curr_index, curr_index+60):
city_name = city_df.iloc[i][0]
city_lat = city_df.iloc[i][1]
city_lon = city_df.iloc[i][2]
city_info = [city_name]
# Retrieve particulate dict
entry = gen_daily_data(name=city_name, lat=city_lat, lon=city_lon, t_start=t_start, t_end=t_end, component_names=component_names)
# Cycle through each component and write to file
for i, component in enumerate(component_names):
# Write entry to file
with open(f_name[i], 'a', newline='') as f_open:
writer_obj = writer(f_open)
city_info+=entry[component]
writer_obj.writerow(city_info)
f_open.close()
print('Request placed, index updated to: ', curr_index)
# Check if current position has reached the end of file
if curr_index < df_size:
# Increment index to next batch of 60
curr_index+=60
await asyncio.sleep(90)
else:
# Otherwise, return from event loop
break
# Finish running
return 0
def main():
'''
Main function to pull data csv data from OpenWeather
Function call order:
main() > batch_request() > request_buffer() > gen_daily_data()
'''
# Retrieve data for list of cities
city_df = | pd.read_csv(filepath_or_buffer=f"{os.environ['HOME']}/github_repos/Pollution-Autoencoders/data/other/city_lat_lon.csv") | pandas.read_csv |
import os
import shutil
import uuid
import pandas
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.PangenomeAPIClient import PanGenomeAPI
class PangenomeDownload:
def __init__(self, config):
self.cfg = config
self.scratch = config['scratch']
self.pga = PanGenomeAPI(os.environ['SDK_CALLBACK_URL'])
self.dfu = DataFileUtil(os.environ['SDK_CALLBACK_URL'])
@staticmethod
def validate_params(params, expected={"workspace_name", "pangenome_name"}):
expected = set(expected)
pkeys = set(params)
if expected - pkeys:
raise ValueError("Required keys {} not in supplied parameters"
.format(", ".join(expected - pkeys)))
def to_tsv(self, params):
files = {}
working_dir = os.path.join(self.scratch,
'pangenome-download-'+str(uuid.uuid4()))
os.makedirs(working_dir)
pg_id, id_name_map, genome_df = self.make_genomes_df(
params['pangenome_ref'])
files['genomes_path'] = os.path.join(working_dir, pg_id + "_Genomes.tsv")
genome_df.to_csv(files['genomes_path'], sep="\t")
ortho_df = self.make_ortholog_df(params['pangenome_ref'], id_name_map)
files['orthologs_path'] = os.path.join(working_dir,
pg_id + "_Orthologs.tsv")
ortho_df.to_csv(files['orthologs_path'], sep="\t")
return pg_id, files
def to_excel(self, params):
files = {}
working_dir = os.path.join(self.scratch,
'pangenome-download-' + str(uuid.uuid4()))
os.makedirs(working_dir)
pg_id, id_name_map, genome_df = self.make_genomes_df(
params['pangenome_ref'])
files['path'] = os.path.join(working_dir, pg_id + ".xlsx")
writer = | pandas.ExcelWriter(files['path']) | pandas.ExcelWriter |
import nltk.data
import pandas as pd
import argparse
import os
def section_start(lines, section=' IMPRESSION'):
"""Finds line index that is the start of the section."""
for idx, line in enumerate(lines):
if line.startswith(section):
return idx
return -1
def generate_whole_report_impression_csv(df, split, dir):
"""Generates a csv containing report impressions."""
df_imp = df.copy()
for index, row in df_imp.iterrows():
report = row['report'].splitlines()
impression_idx = section_start(report)
impression_and_findings_idx = section_start(report, section=' FINDINGS AND IMPRESSION:')
seperator = ''
if impression_idx != -1:
impression = seperator.join(report[impression_idx:]).replace('IMPRESSION:', '').replace('\n', '').strip()
elif impression_and_findings_idx != -1:
impression = seperator.join(report[impression_and_findings_idx:]).replace('FINDINGS AND IMPRESSION:', '').replace('\n', '').strip()
else:
impression = ''
df_imp.at[index,'report']= impression
out_name = f'mimic_{split}_impressions.csv'
out_path = os.path.join(dir, out_name)
df_imp.to_csv(out_path, index=False)
def generate_sentence_level_impression_csv(df, split, dir, tokenizer):
"""Generates a csv containing all impression sentences."""
df_imp = []
for index, row in df.iterrows():
report = row['report'].splitlines()
impression_idx = section_start(report)
impression_and_findings_idx = section_start(report, section=' FINDINGS AND IMPRESSION:')
seperator = ''
if impression_idx != -1:
impression = seperator.join(report[impression_idx:]).replace('IMPRESSION:', '').replace('\n', '').strip()
elif impression_and_findings_idx != -1:
impression = seperator.join(report[impression_and_findings_idx:]).replace('FINDINGS AND IMPRESSION:', '').replace('\n', '').strip()
else:
impression = ''
for sent_index, sent in enumerate(split_sentences(impression, tokenizer)):
df_imp.append([row['dicom_id'], row['study_id'], row['subject_id'], sent_index, sent])
df_imp = | pd.DataFrame(df_imp, columns=['dicom_id', 'study_id', 'subject_id', 'sentence_id', 'report']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
docstring goes here.
:copyright: Copyright 2014 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
import unittest
from itertools import chain
from neo.test.generate_datasets import fake_neo
import numpy as np
from numpy.testing.utils import assert_array_equal
import quantities as pq
try:
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_index_equal
except ImportError:
HAVE_PANDAS = False
else:
import elephant.pandas_bridge as ep
HAVE_PANDAS = True
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiindexFromDictTestCase(unittest.TestCase):
def test__multiindex_from_dict(self):
inds = {'test1': 6.5,
'test2': 5,
'test3': 'test'}
targ = pd.MultiIndex(levels=[[6.5], [5], ['test']],
labels=[[0], [0], [0]],
names=['test1', 'test2', 'test3'])
res0 = ep._multiindex_from_dict(inds)
self.assertEqual(targ.levels, res0.levels)
self.assertEqual(targ.names, res0.names)
self.assertEqual(targ.labels, res0.labels)
def _convert_levels(levels):
"""Convert a list of levels to the format pandas returns for a MultiIndex.
Parameters
----------
levels : list
The list of levels to convert.
Returns
-------
list
The the level in `list` converted to values like what pandas will give.
"""
levels = list(levels)
for i, level in enumerate(levels):
if hasattr(level, 'lower'):
try:
level = unicode(level)
except NameError:
pass
elif hasattr(level, 'date'):
levels[i] = pd.DatetimeIndex(data=[level])
continue
elif level is None:
levels[i] = pd.Index([])
continue
levels[i] = pd.Index([level])
return levels
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class ConvertValueSafeTestCase(unittest.TestCase):
def test__convert_value_safe__float(self):
targ = 5.5
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__str(self):
targ = 'test'
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__bytes(self):
targ = 'test'
value = b'test'
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
def test__convert_value_safe__numpy_int_scalar(self):
targ = 5
value = np.array(5)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_float_scalar(self):
targ = 5.
value = np.array(5.)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_unicode_scalar(self):
targ = u'test'
value = np.array('test', dtype='U')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_str_scalar(self):
targ = u'test'
value = np.array('test', dtype='S')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__quantity_scalar(self):
targ = (10., 'ms')
value = 10. * pq.ms
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res[0], 'dtype'))
self.assertFalse(hasattr(res[0], 'units'))
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class SpiketrainToDataframeTestCase(unittest.TestCase):
def test__spiketrain_to_dataframe__parents_empty(self):
obj = fake_neo('SpikeTrain', seed=0)
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, child_first=False)
res3 = ep.spiketrain_to_dataframe(obj, parents=True)
res4 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=True)
res5 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=False)
res6 = ep.spiketrain_to_dataframe(obj, parents=False)
res7 = ep.spiketrain_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
self.assertEqual(len(obj), len(res4.index))
self.assertEqual(len(obj), len(res5.index))
self.assertEqual(len(obj), len(res6.index))
self.assertEqual(len(obj), len(res7.index))
self.assertEqual(len(obj), len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(['spike_number'], res4.index.names)
self.assertEqual(['spike_number'], res5.index.names)
self.assertEqual(['spike_number'], res6.index.names)
self.assertEqual(['spike_number'], res7.index.names)
self.assertEqual(['spike_number'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, parents=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=True)
res3 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=True)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, child_first=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EventToDataframeTestCase(unittest.TestCase):
def test__event_to_dataframe__parents_empty(self):
obj = fake_neo('Event', seed=42)
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, child_first=False)
res3 = ep.event_to_dataframe(obj, parents=True)
res4 = ep.event_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.event_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.event_to_dataframe(obj, parents=False)
res7 = ep.event_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.event_to_dataframe(obj, parents=False, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(['times'], res4.index.names)
self.assertEqual(['times'], res5.index.names)
self.assertEqual(['times'], res6.index.names)
self.assertEqual(['times'], res7.index.names)
self.assertEqual(['times'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, parents=False)
res1 = ep.event_to_dataframe(obj, parents=False, child_first=False)
res2 = ep.event_to_dataframe(obj, parents=False, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, parents=True)
res3 = ep.event_to_dataframe(obj, parents=True, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, child_first=False)
res1 = ep.event_to_dataframe(obj, parents=True, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
| assert_index_equal(value, level) | pandas.util.testing.assert_index_equal |
__author__ = 'lucabasa'
__version__ = '1.2.0'
__status__ = 'development'
import pandas as pd
import numpy as np
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.model_selection import StratifiedShuffleSplit, train_test_split
from sklearn.pipeline import Pipeline
from sklearn.base import clone
from tubesml.model_inspection import get_coef, get_feature_importance, get_pdp
from tubesml.base import BaseTransformer
def grid_search(data, target, estimator, param_grid, scoring, cv, random=False):
'''
Calls a grid or a randomized search over a parameter grid
:param data: pandas DataFrame.
Data to tune the hyperparameters
:param target: numpy array or pandas Series.
Target column
:param estimator: sklearn compatible estimator.
It must have a ``predict`` method and a ``get_params`` method.
It can be a Pipeline.
:param param_grid: dict.
Dictionary of the parameter space to explore.
In case the ``estimator`` is a pipeline, provide the keys in the format ``step__param``.
:param scoring: string.
Scoring metric for the grid search, see the sklearn documentation for the available options.
:param cv: KFold object or int.
For cross-validation.
:param random: bool, default=False.
If True, runs a RandomSearch instead of a GridSearch.
:return: a dataframe with the results for each configuration
:return: a dictionary with the best parameters
:return: the best (fitted) estimator
'''
if random:
grid = RandomizedSearchCV(estimator=estimator, param_distributions=param_grid, cv=cv, scoring=scoring,
n_iter=random, n_jobs=-1, random_state=434, return_train_score=True, error_score='raise')
else:
grid = GridSearchCV(estimator=estimator, param_grid=param_grid, error_score='raise',
cv=cv, scoring=scoring, n_jobs=-1, return_train_score=True)
pd.options.mode.chained_assignment = None # turn on and off a warning of pandas
tmp = data.copy()
grid = grid.fit(tmp, target)
pd.options.mode.chained_assignment = 'warn'
result = pd.DataFrame(grid.cv_results_).sort_values(by='mean_test_score',
ascending=False).reset_index()
del result['params']
times = [col for col in result.columns if col.endswith('_time')]
params = [col for col in result.columns if col.startswith('param_')]
result = result[params + ['mean_train_score', 'std_train_score', 'mean_test_score', 'std_test_score'] + times]
return result, grid.best_params_, grid.best_estimator_
def cv_score(data, target, estimator, cv, imp_coef=False, pdp=None, predict_proba=False, early_stopping=None, eval_metric=None, verbose=False):
'''
Train and test a pipeline in kfold cross validation
:param data: pandas DataFrame.
Data to tune the hyperparameters.
:param target: numpy array or pandas Series.
Target column.
:param estimator: sklearn compatible estimator.
It must have a ``predict`` method and a ``get_params`` method.
It can be a Pipeline. If it is not a Pipeline, it will be made one for compatibility with other functionalities.
:param cv: KFold object.
For cross-validation, the estimates will be done across these folds.
:param imp_coef: bool, default=False.
If True, returns the feature importance or the coefficient values averaged across the folds, with standard deviation on the mean.
:param pdp: string or list, default=None.
If not None, returns the partial dependence of the given features averaged across the folds, with standard deviation on the mean.
The partial dependence of 2 features simultaneously is not supported.
:param predict_proba: bool, default=False.
If True, calls the ``predict_proba`` method instead of the ``predict`` one.
:param early_stopping: bool, default=False.
If True, uses early stopping within the folds for the estimators that support it.
:param eval_metric: str, default=None.
The evaluation metric to use for early stopping.
:param verbose: bool or int, default=False.
Level of verbosity for early stopping.
:return oof: pd.Series with the out of fold predictions for the entire train set.
:return rep_res: A dictionary with additional results. If ``imp_coef=True``, it contains a pd.DataFrame with the coefficients or
feature importances of the estimator, it can be found under the key ``feat_imp``. If ``early_stopping=True``, it contains a list
with the best iteration number per fold, it can be found under the key ``iterations``. If ``pdp`` is not ``None``, it contains a
pd.DataFrame with the partial dependence of the given features, it can be found under the key ``pdp``
'''
oof = np.zeros(len(data))
train = data.copy()
rep_res = {}
feat_df = pd.DataFrame()
iteration = []
feat_pdp = pd.DataFrame()
try: # If estimator is not a pipeline, make a pipeline
estimator.steps
except AttributeError:
estimator = Pipeline([('transf', BaseTransformer()), ('model', estimator)])
for n_fold, (train_index, test_index) in enumerate(cv.split(train.values)):
trn_data = train.iloc[train_index, :]
val_data = train.iloc[test_index, :]
trn_target = pd.Series(target.iloc[train_index].values.ravel())
val_target = pd.Series(target.iloc[test_index].values.ravel())
# create model and transform pipelines
transf_pipe = clone(Pipeline(estimator.steps[:-1]))
model = clone(estimator.steps[-1][1]) # it creates issues with match_cols in dummy otherwise
# Transform the data for the model
trn_data = transf_pipe.fit_transform(trn_data, trn_target)
val_data = transf_pipe.transform(val_data)
if early_stopping:
# Fit the model with early stopping
model.fit(trn_data, trn_target,
eval_set=[(trn_data, trn_target), (val_data, val_target)],
early_stopping_rounds=early_stopping,
eval_metric=eval_metric,
verbose=verbose)
#store iteration used
try:
iteration.append(model.best_iteration)
except AttributeError:
iteration.append(model.best_iteration_)
else:
model.fit(trn_data, trn_target)
if predict_proba:
oof[test_index] = model.predict_proba(val_data)[:,1]
else:
oof[test_index] = model.predict(val_data).ravel()
if imp_coef:
feats = trn_data.columns
try:
fold_df = get_coef(model, feats)
except (AttributeError, KeyError):
fold_df = get_feature_importance(model, feats)
fold_df['fold'] = n_fold + 1
feat_df = pd.concat([feat_df, fold_df], axis=0)
if pdp is not None:
pdp_set = transf_pipe.transform(train) # to have the same data ranges in each fold
# The pdp will still be different by fold
if isinstance(pdp, str):
pdp = [pdp]
fold_pdp = []
for feat in pdp:
if isinstance(feat, tuple): # 2-way pdp is not supported as we can't take a good average
continue
fold_tmp = get_pdp(model, feat, pdp_set)
fold_tmp['fold'] = n_fold + 1
fold_pdp.append(fold_tmp)
fold_pdp = | pd.concat(fold_pdp, axis=0) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import tree
import pydotplus
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn import metrics #Import scikit-learn metrics module for accuracy calculation
import time
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
def MySupportVectorMachine():
#Creazione del classificatore
classifier = SVC(C=10, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=1, shrinking=True, tol=0.001,
verbose=False)
#Si allena il classificatore
classifier.fit(X_train, y_train)
# E ora si predice sul Test Set
predicted = classifier.predict(X_test)
#Rinomino i campi per la matrice di confusione
labels = ("Female","Male")
positions = (0,1)
#Stampa dei risultati
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(y_test, predicted)))
disp = metrics.plot_confusion_matrix(classifier, X_test, y_test, cmap="OrRd")
disp.figure_.suptitle("Confusion Matrix")
print("Confusion matrix:\n%s" % disp.confusion_matrix)
#Stampa del tempo intercorso per processare il classificatore
print ("\nTempo trascorso: {:.2f}m\n".format((time.time()-start_time)/60))
#Stampa a video della matrice di confusione
plt.xticks(positions,labels)
plt.yticks(positions,labels)
plt.savefig('OutputSVM/ConfusionMatrix.png', bbox_inches='tight')
plt.show()
#Stampa dell'accuratezza
from sklearn.metrics import accuracy_score
print("Accuratezza: ")
print(accuracy_score(y_test, predicted))
def MyDecisionTree():
#The decision tree classifier
clf = tree.DecisionTreeClassifier(criterion = "gini", max_depth= 13)
#Alleno il decision tree
clf_train = clf.fit(X_train, y_train)
#Predico la risposta per il dataset
y_pred = clf.predict(X_test)
#Model Accuracy, valuto il modello
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
#Creo un decision tree in formato DOT utilizzando GraphViz
dot_data = tree.export_graphviz(clf_train, out_file=None, feature_names=X_train.columns.values,
class_names=['Female', 'Male'], rounded=True, filled=True) #Gini decides which attribute/feature should be placed at the root node, which features will act as internal nodes or leaf nodes
#Creo il decision tree in formato Graph partendo dal formato DOT
graph = pydotplus.graph_from_dot_data(dot_data)
#Salvo in png il decision tree creato
test2 = graph.write_png("OutputDT/GraphDecisionTree.png")
print ("\nTempo trascorso: {:.2f}m\n".format((time.time()-start_time)/60))
def MyNearestNeighbors():
#NearestNeighbors classifier
classifier = KNeighborsClassifier(n_neighbors=9)
#Alleno il classificatore
clf_train = classifier.fit(X_train, y_train)
#Predico la risposta per il dataset
y_pred = classifier.predict(X_test)
labels = ("Female","Male")
positions = (0,1)
#Stampa dei risultati
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(y_test, y_pred)))
disp = metrics.plot_confusion_matrix(classifier, X_test, y_test, cmap=plt.cm.Blues)
disp.figure_.suptitle("Confusion Matrix")
print("Confusion matrix:\n%s" % disp.confusion_matrix)
# Model Accuracy, valuto il modello
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
print("\nTempo trascorso: {:.2f}m\n".format((time.time() - start_time) / 60))
#Stampa a video
plt.xticks(positions,labels)
plt.yticks(positions,labels)
plt.savefig('OutputKNN/ConfusionMatrixBalancedWeb_n9.png', bbox_inches='tight')
plt.show()
#Funzione creazione dataset bilanciato
def CreateBalanced4c4s(dfconc):
#Creo un csv corrispondente al dataset4c4s ma con l'aggiunta della colonna "Gender"
dfconc.to_csv("DatasetCelebA/Dataset4c4sBalanced1000.csv", header = False, index = False)
#Leggo il csv appena creato per andare ad eseguire le operazioni di manipolazione
DFbalanced = pd.read_csv("DatasetCelebA/Dataset4c4sBalanced1000.csv",header = None)
#Salvo in un dataframe tutte le righe con gender pari a 1(uomo)
dfBalanceM = DFbalanced.loc[DFbalanced[64] == 1]
#Salvo in un dataframe tutte le righe con gender pari a -1(donna)
dfBalanceF = DFbalanced.loc[DFbalanced[64] == -1]
#Droppo le righe in eccesso del dataframe femminile (rispetto al dataframe maschile)
dfBalanceF = dfBalanceF.iloc[0:432]
#Unisco i due dataframe aventi lo stesso numero di elementi
DFbalanced = pd.concat([dfBalanceM,dfBalanceF], axis = 0)
#Creo il csv corrispondente
DFbalanced.to_csv("DatasetCelebA/Dataset4c4sBalanced1000.csv", header = False, index = False)
#Funzione per suddividere il dataset bilanciato in train e test set
def ExecOnBalanced():
#Leggo dataset bilanciato
dataframe = pd.read_csv("DatasetCelebA/Dataset4c4sBalanced1000.csv", header=None)
#Rinomino la colonna 64 in Gender.
dataframe = dataframe.rename(columns={dataframe.columns[64]: "Gender"}) # -1 donna e 1 maschio
#Ottengo feature variables
feature_cols = list(dataframe.columns.values)
X = feature_cols[1:len(feature_cols) - 1]
X = dataframe[X]
#Ottengo target variables
y = dataframe.Gender
#Divido il dataframe in train e test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,random_state=1) # 70% training and 30% test
return X_train,X_test,y_train,y_test
#Funzione lettura CSV su cui eseguire i test
def ReadCSV():
# Caricamento dei due dataset
dataframe = pd.read_csv("DatasetCelebA/dataset4c4s1000.csv", header=None)
feature = pd.read_csv("DatasetCelebA/list_attr_celeba.csv")
# Prendo la colonna delle features riguardante il sesso.
feat = feature.iloc[0:1000, 21]
df_X = pd.DataFrame(feat)
# Rinonimo la colonna da Male a Gender.
rename = df_X.rename(columns={"Male": "Gender"}) # -1 donna e 1 maschio
# Concateno i due dataframe per crearne uno. Non necessario per il dataset bilanciato
dfconc = | pd.concat([dataframe, rename], axis=1, sort=False) | pandas.concat |
#!/usr/bin/env python
"""
This module will read sas7bdat files using pure Python (2.7+, 3+).
No SAS software required!
"""
from __future__ import division, absolute_import, print_function,\
unicode_literals
import atexit
import csv
import logging
import math
import os
import platform
import struct
import sys
from codecs import open
from datetime import datetime, timedelta
import six
xrange = six.moves.range
__all__ = ['SAS7BDAT']
def _debug(t, v, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
sys.__excepthook__(t, v, tb)
else:
import pdb
import traceback
traceback.print_exception(t, v, tb)
print()
pdb.pm()
os._exit(1)
def _get_color_emit(prefix, fn):
# This doesn't work on Windows since Windows doesn't support
# the ansi escape characters
def _new(handler):
levelno = handler.levelno
if levelno >= logging.CRITICAL:
color = '\x1b[31m' # red
elif levelno >= logging.ERROR:
color = '\x1b[31m' # red
elif levelno >= logging.WARNING:
color = '\x1b[33m' # yellow
elif levelno >= logging.INFO:
color = '\x1b[32m' # green or normal
elif levelno >= logging.DEBUG:
color = '\x1b[35m' # pink
else:
color = '\x1b[0m' # normal
try:
handler.msg = '%s[%s] %s%s' % (
color, prefix, handler.msg, '\x1b[0m'
)
except UnicodeDecodeError:
handler.msg = '%s[%s] %s%s' % (
color, prefix, handler.msg.decode('utf-8'), '\x1b[0m'
)
return fn(handler)
return _new
class ParseError(Exception):
pass
class Decompressor(object):
def __init__(self, parent):
self.parent = parent
def decompress_row(self, offset, length, result_length, page):
raise NotImplementedError
@staticmethod
def to_ord(int_or_str):
if isinstance(int_or_str, int):
return int_or_str
return ord(int_or_str)
@staticmethod
def to_chr(int_or_str):
py2 = six.PY2
if isinstance(int_or_str, (bytes, bytearray)):
return int_or_str
if py2:
return chr(int_or_str)
return bytes([int_or_str])
class RLEDecompressor(Decompressor):
"""
Decompresses data using the Run Length Encoding algorithm
"""
def decompress_row(self, offset, length, result_length, page):
b = self.to_ord
c = self.to_chr
current_result_array_index = 0
result = []
i = 0
for j in xrange(length):
if i != j:
continue
control_byte = b(page[offset + i]) & 0xF0
end_of_first_byte = b(page[offset + i]) & 0x0F
if control_byte == 0x00:
if i != (length - 1):
count_of_bytes_to_copy = (
(b(page[offset + i + 1]) & 0xFF) +
64 +
end_of_first_byte * 256
)
start = offset + i + 2
end = start + count_of_bytes_to_copy
result.append(c(page[start:end]))
i += count_of_bytes_to_copy + 1
current_result_array_index += count_of_bytes_to_copy
elif control_byte == 0x40:
copy_counter = (
end_of_first_byte * 16 +
(b(page[offset + i + 1]) & 0xFF)
)
for _ in xrange(copy_counter + 18):
result.append(c(page[offset + i + 2]))
current_result_array_index += 1
i += 2
elif control_byte == 0x60:
for _ in xrange(end_of_first_byte * 256 +
(b(page[offset + i + 1]) & 0xFF) + 17):
result.append(c(0x20))
current_result_array_index += 1
i += 1
elif control_byte == 0x70:
for _ in xrange(end_of_first_byte * 256 +
(b(page[offset + i + 1]) & 0xFF) + 17):
result.append(c(0x00))
current_result_array_index += 1
i += 1
elif control_byte == 0x80:
count_of_bytes_to_copy = min(end_of_first_byte + 1,
length - (i + 1))
start = offset + i + 1
end = start + count_of_bytes_to_copy
result.append(c(page[start:end]))
i += count_of_bytes_to_copy
current_result_array_index += count_of_bytes_to_copy
elif control_byte == 0x90:
count_of_bytes_to_copy = min(end_of_first_byte + 17,
length - (i + 1))
start = offset + i + 1
end = start + count_of_bytes_to_copy
result.append(c(page[start:end]))
i += count_of_bytes_to_copy
current_result_array_index += count_of_bytes_to_copy
elif control_byte == 0xA0:
count_of_bytes_to_copy = min(end_of_first_byte + 33,
length - (i + 1))
start = offset + i + 1
end = start + count_of_bytes_to_copy
result.append(c(page[start:end]))
i += count_of_bytes_to_copy
current_result_array_index += count_of_bytes_to_copy
elif control_byte == 0xB0:
count_of_bytes_to_copy = min(end_of_first_byte + 49,
length - (i + 1))
start = offset + i + 1
end = start + count_of_bytes_to_copy
result.append(c(page[start:end]))
i += count_of_bytes_to_copy
current_result_array_index += count_of_bytes_to_copy
elif control_byte == 0xC0:
for _ in xrange(end_of_first_byte + 3):
result.append(c(page[offset + i + 1]))
current_result_array_index += 1
i += 1
elif control_byte == 0xD0:
for _ in xrange(end_of_first_byte + 2):
result.append(c(0x40))
current_result_array_index += 1
elif control_byte == 0xE0:
for _ in xrange(end_of_first_byte + 2):
result.append(c(0x20))
current_result_array_index += 1
elif control_byte == 0xF0:
for _ in xrange(end_of_first_byte + 2):
result.append(c(0x00))
current_result_array_index += 1
else:
self.parent.logger.error('unknown control byte: %s',
control_byte)
i += 1
result = b''.join(result)
if len(result) != result_length:
self.parent.logger.error('unexpected result length: %d != %d' %
(len(result), result_length))
return result
class RDCDecompressor(Decompressor):
"""
Decompresses data using the Ross Data Compression algorithm
http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/
articles/CUJ/1992/9210/ross/ross.htm
"""
def decompress_row(self, offset, length, result_length, page):
src_row = [self.to_ord(x) for x in page[offset:offset + length]]
out_row = [0] * result_length
ctrl_mask = 0
ctrl_bits = 0
src_offset = 0
out_offset = 0
# process each item in src_row
while src_offset < (len(src_row) - 2):
# get new load of control bits if needed
ctrl_mask = ctrl_mask >> 1
if ctrl_mask == 0:
ctrl_bits = (src_row[src_offset] << 8) +\
src_row[src_offset + 1]
src_offset += 2
ctrl_mask = 0x8000
# just copy this char if control bit is zero
if (ctrl_bits & ctrl_mask) == 0:
out_row[out_offset] = src_row[src_offset]
out_offset += 1
src_offset += 1
continue
# undo the compression code
cmd = (src_row[src_offset] >> 4) & 0x0F
cnt = src_row[src_offset] & 0x0F
src_offset += 1
if cmd == 0: # short rle
cnt += 3
for k in xrange(cnt):
out_row[out_offset + k] = src_row[src_offset]
out_offset += cnt
src_offset += 1
elif cmd == 1: # long rle
cnt += src_row[src_offset] << 4
cnt += 19
src_offset += 1
for k in xrange(cnt):
out_row[out_offset + k] = src_row[src_offset]
out_offset += cnt
src_offset += 1
elif cmd == 2: # long pattern
ofs = cnt + 3
ofs += src_row[src_offset] << 4
src_offset += 1
cnt = src_row[src_offset]
src_offset += 1
cnt += 16
for k in xrange(cnt):
out_row[out_offset + k] = out_row[out_offset - ofs + k]
out_offset += cnt
elif cmd >= 3 and cmd <= 15: # short pattern
ofs = cnt + 3
ofs += src_row[src_offset] << 4
src_offset += 1
for k in xrange(cmd):
out_row[out_offset + k] = out_row[out_offset - ofs + k]
out_offset += cmd
else:
self.parent.logger.error(
'unknown marker %s at offset %s', src_row[src_offset],
src_offset
)
break
return b''.join([self.to_chr(x) for x in out_row])
class SAS7BDAT(object):
"""
SAS7BDAT(path[, log_level[, extra_time_format_strings[, \
extra_date_time_format_strings[, extra_date_format_strings[, \
fh=fh]]]]]) -> \
SAS7BDAT object
Open a SAS7BDAT file or use an existing file handle.
The log level are standard logging levels (defaults to logging.INFO).
If your sas7bdat file uses non-standard format strings for time, datetime,
or date values, pass those strings into the constructor using the
appropriate kwarg.
The file will be opened from the path supplied, unless a file handle
is supplied. The file handle should be opened in binary mode for
correct operation.
"""
_open_files = []
RLE_COMPRESSION = b'SASYZCRL'
RDC_COMPRESSION = b'SASYZCR2'
COMPRESSION_LITERALS = set([
RLE_COMPRESSION, RDC_COMPRESSION
])
DECOMPRESSORS = {
RLE_COMPRESSION: RLEDecompressor,
RDC_COMPRESSION: RDCDecompressor
}
TIME_FORMAT_STRINGS = set([
'TIME'
])
DATE_TIME_FORMAT_STRINGS = set([
'DATETIME'
])
DATE_FORMAT_STRINGS = set([
'YYMMDD', 'MMDDYY', 'DDMMYY', 'DATE', 'JULIAN', 'MONYY'
])
def __init__(self, path, log_level=logging.INFO,
extra_time_format_strings=None,
extra_date_time_format_strings=None,
extra_date_format_strings=None,
skip_header=False,
encoding='utf8',
encoding_errors='ignore',
align_correction=True,
fh=None, strip_whitespace_from_strings=True):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
if log_level == logging.DEBUG:
sys.excepthook = _debug
self.path = path
self.endianess = None
self.u64 = False
self.logger = self._make_logger(level=log_level)
self._update_format_strings(
self.TIME_FORMAT_STRINGS, extra_time_format_strings
)
self._update_format_strings(
self.DATE_TIME_FORMAT_STRINGS, extra_date_time_format_strings
)
self._update_format_strings(
self.DATE_FORMAT_STRINGS, extra_date_format_strings
)
self.skip_header = skip_header
self.strip_whitespace_from_strings = strip_whitespace_from_strings
self.encoding = encoding
self.encoding_errors = encoding_errors
self.align_correction = align_correction
self._file = fh or open(self.path, 'rb')
self._open_files.append(self._file)
self.cached_page = None
self.current_page_type = None
self.current_page_block_count = None
self.current_page_subheaders_count = None
self.current_file_position = 0
self.current_page_data_subheader_pointers = []
self.current_row = []
self.column_names_strings = []
self.column_names = []
self.column_types = []
self.column_data_offsets = []
self.column_data_lengths = []
self.columns = []
self.header = SASHeader(self)
self.properties = self.header.properties
self.header.parse_metadata()
self.logger.debug('\n%s', self.header)
self._iter = self.readlines()
def __repr__(self):
"""
x.__repr__() <==> repr(x)
"""
return 'SAS7BDAT file: %s' % os.path.basename(self.path)
def __enter__(self):
"""
__enter__() -> self.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Closes the file.
"""
self.close()
def __iter__(self):
"""
x.__iter__() <==> iter(x)
"""
return self.readlines()
def _update_format_strings(self, var, format_strings):
if format_strings is not None:
if isinstance(format_strings, str):
var.add(format_strings)
elif isinstance(format_strings, (set, list, tuple)):
var.update(set(format_strings))
else:
raise NotImplementedError
def close(self):
"""
close() -> None or (perhaps) an integer. Close the file.
A closed file cannot be used for further I/O operations.
close() may be called more than once without error.
Some kinds of file objects (for example, opened by popen())
may return an exit status upon closing.
"""
return self._file.close()
def _make_logger(self, level=logging.INFO):
"""
Create a custom logger with the specified properties.
"""
logger = logging.getLogger(self.path)
logger.setLevel(level)
fmt = '%(message)s'
stream_handler = logging.StreamHandler()
if platform.system() != 'Windows':
stream_handler.emit = _get_color_emit(
os.path.basename(self.path),
stream_handler.emit
)
else:
fmt = '[%s] %%(message)s' % os.path.basename(self.path)
formatter = logging.Formatter(fmt, '%y-%m-%d %H:%M:%S')
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
return logger
def _read_bytes(self, offsets_to_lengths):
result = {}
if not self.cached_page:
for offset, length in six.iteritems(offsets_to_lengths):
skipped = 0
while skipped < (offset - self.current_file_position):
seek = offset - self.current_file_position - skipped
skipped += seek
self._file.seek(seek, 0)
tmp = self._file.read(length)
if len(tmp) < length:
self.logger.error(
'failed to read %s bytes from sas7bdat file', length
)
self.current_file_position = offset + length
result[offset] = tmp
else:
for offset, length in six.iteritems(offsets_to_lengths):
result[offset] = self.cached_page[offset:offset + length]
return result
def _read_val(self, fmt, raw_bytes, size):
if fmt == 'i' and self.u64 and size == 8:
fmt = 'q'
newfmt = fmt
if fmt == 's':
newfmt = '%ds' % min(size, len(raw_bytes))
elif fmt in set(['number', 'datetime', 'date', 'time']):
newfmt = 'd'
if len(raw_bytes) != size:
size = len(raw_bytes)
if size < 8:
if self.endianess == 'little':
raw_bytes = b''.join([b'\x00' * (8 - size), raw_bytes])
else:
raw_bytes += b'\x00' * (8 - size)
size = 8
if self.endianess == 'big':
newfmt = '>%s' % newfmt
else:
newfmt = '<%s' % newfmt
val = struct.unpack(str(newfmt), raw_bytes[:size])[0]
if fmt == 's':
val = val.strip(b'\x00')
if self.strip_whitespace_from_strings:
val = val.strip()
elif math.isnan(val):
val = None
elif fmt == 'datetime':
val = datetime(1960, 1, 1) + timedelta(seconds=val)
elif fmt == 'time':
val = (datetime(1960, 1, 1) + timedelta(seconds=val)).time()
elif fmt == 'date':
try:
val = (datetime(1960, 1, 1) + timedelta(days=val)).date()
except OverflowError:
# Some data sets flagged with a date format are actually
# stored as datetime values
val = datetime(1960, 1, 1) + timedelta(seconds=val)
return val
def readlines(self):
"""
readlines() -> generator which yields lists of values, each a line
from the file.
Possible values in the list are None, string, float, datetime.datetime,
datetime.date, and datetime.time.
"""
bit_offset = self.header.PAGE_BIT_OFFSET
subheader_pointer_length = self.header.SUBHEADER_POINTER_LENGTH
row_count = self.header.properties.row_count
current_row_in_file_index = 0
current_row_on_page_index = 0
if not self.skip_header:
yield [x.name.decode(self.encoding, self.encoding_errors)
for x in self.columns]
if not self.cached_page:
self._file.seek(self.properties.header_length)
self._read_next_page()
while current_row_in_file_index < row_count:
current_row_in_file_index += 1
current_page_type = self.current_page_type
if current_page_type == self.header.PAGE_META_TYPE:
try:
current_subheader_pointer =\
self.current_page_data_subheader_pointers[
current_row_on_page_index
]
except IndexError:
self._read_next_page()
current_row_on_page_index = 0
else:
current_row_on_page_index += 1
cls = self.header.SUBHEADER_INDEX_TO_CLASS.get(
self.header.DATA_SUBHEADER_INDEX
)
if cls is None:
raise NotImplementedError
cls(self).process_subheader(
current_subheader_pointer.offset,
current_subheader_pointer.length
)
if current_row_on_page_index ==\
len(self.current_page_data_subheader_pointers):
self._read_next_page()
current_row_on_page_index = 0
elif current_page_type in self.header.PAGE_MIX_TYPE:
if self.align_correction:
align_correction = (
bit_offset + self.header.SUBHEADER_POINTERS_OFFSET +
self.current_page_subheaders_count *
subheader_pointer_length
) % 8
else:
align_correction = 0
offset = (
bit_offset + self.header.SUBHEADER_POINTERS_OFFSET +
align_correction + self.current_page_subheaders_count *
subheader_pointer_length + current_row_on_page_index *
self.properties.row_length
)
try:
self.current_row = self._process_byte_array_with_data(
offset,
self.properties.row_length
)
except:
self.logger.exception(
'failed to process data (you might want to try '
'passing align_correction=%s to the SAS7BDAT '
'constructor)' % (not self.align_correction)
)
raise
current_row_on_page_index += 1
if current_row_on_page_index == min(
self.properties.row_count,
self.properties.mix_page_row_count
):
self._read_next_page()
current_row_on_page_index = 0
elif current_page_type == self.header.PAGE_DATA_TYPE:
self.current_row = self._process_byte_array_with_data(
bit_offset + self.header.SUBHEADER_POINTERS_OFFSET +
current_row_on_page_index *
self.properties.row_length,
self.properties.row_length
)
current_row_on_page_index += 1
if current_row_on_page_index == self.current_page_block_count:
self._read_next_page()
current_row_on_page_index = 0
else:
self.logger.error('unknown page type: %s', current_page_type)
yield self.current_row
def _read_next_page(self):
self.current_page_data_subheader_pointers = []
self.cached_page = self._file.read(self.properties.page_length)
if len(self.cached_page) <= 0:
return
if len(self.cached_page) != self.properties.page_length:
self.logger.error(
'failed to read complete page from file (read %s of %s bytes)',
len(self.cached_page), self.properties.page_length
)
self.header.read_page_header()
if self.current_page_type == self.header.PAGE_META_TYPE:
self.header.process_page_metadata()
if self.current_page_type not in [
self.header.PAGE_META_TYPE,
self.header.PAGE_DATA_TYPE
] + self.header.PAGE_MIX_TYPE:
self._read_next_page()
def _process_byte_array_with_data(self, offset, length):
row_elements = []
if self.properties.compression and length < self.properties.row_length:
decompressor = self.DECOMPRESSORS.get(
self.properties.compression
)
source = decompressor(self).decompress_row(
offset, length, self.properties.row_length,
self.cached_page
)
offset = 0
else:
source = self.cached_page
for i in xrange(self.properties.column_count):
length = self.column_data_lengths[i]
if length == 0:
break
start = offset + self.column_data_offsets[i]
end = offset + self.column_data_offsets[i] + length
temp = source[start:end]
if self.columns[i].type == 'number':
if self.column_data_lengths[i] <= 2:
row_elements.append(self._read_val(
'h', temp, length
))
else:
fmt = self.columns[i].format
if not fmt:
row_elements.append(self._read_val(
'number', temp, length
))
elif fmt in self.TIME_FORMAT_STRINGS:
row_elements.append(self._read_val(
'time', temp, length
))
elif fmt in self.DATE_TIME_FORMAT_STRINGS:
row_elements.append(self._read_val(
'datetime', temp, length
))
elif fmt in self.DATE_FORMAT_STRINGS:
row_elements.append(self._read_val(
'date', temp, length
))
else:
row_elements.append(self._read_val(
'number', temp, length
))
else: # string
row_elements.append(self._read_val(
's', temp, length
).decode(self.encoding, self.encoding_errors))
return row_elements
def convert_file(self, out_file, delimiter=',', step_size=100000,
encoding=None):
"""
convert_file(out_file[, delimiter[, step_size]]) -> None
A convenience method to convert a SAS7BDAT file into a delimited
text file. Defaults to comma separated. The step_size parameter
is uses to show progress on longer running conversions.
"""
delimiter = str(delimiter)
self.logger.debug('saving as: %s', out_file)
out_f = None
success = True
try:
if out_file == '-':
out_f = sys.stdout
else:
out_f = open(out_file, 'w', encoding=encoding)
out = csv.writer(out_f, lineterminator='\n', delimiter=delimiter)
i = 0
for i, line in enumerate(self, 1):
if len(line) != (self.properties.column_count or 0):
msg = 'parsed line into %s columns but was ' \
'expecting %s.\n%s' %\
(len(line), self.properties.column_count, line)
self.logger.error(msg)
success = False
if self.logger.level == logging.DEBUG:
raise ParseError(msg)
break
if not i % step_size:
self.logger.info(
'%.1f%% complete',
float(i) / self.properties.row_count * 100.0
)
try:
out.writerow(line)
except IOError:
self.logger.warn('wrote %s lines before interruption', i)
success = False
break
self.logger.info('\u27f6 [%s] wrote %s of %s lines',
os.path.basename(out_file), i - 1,
self.properties.row_count or 0)
except Exception:
self.logger.exception()
success = False
finally:
if out_f is not None:
out_f.close()
return success
def to_data_frame(self):
"""
to_data_frame() -> pandas.DataFrame object
A convenience method to convert a SAS7BDAT file into a pandas
DataFrame.
"""
import pandas as pd
data = list(self.readlines())
return | pd.DataFrame(data[1:], columns=data[0]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# > Note: KNN is a memory-based model, that means it will memorize the patterns and not generalize. It is simple yet powerful technique and compete with SOTA models like BERT4Rec.
# In[1]:
import os
project_name = "reco-tut-itr"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
if not os.path.exists(project_path):
get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content')
import mykeys
get_ipython().system(u'rm /content/mykeys.py')
path = "/content/" + project_name;
get_ipython().system(u'mkdir "{path}"')
get_ipython().magic(u'cd "{path}"')
import sys; sys.path.append(path)
get_ipython().system(u'git config --global user.email "<EMAIL>"')
get_ipython().system(u'git config --global user.name "reco-tut"')
get_ipython().system(u'git init')
get_ipython().system(u'git remote add origin https://"{mykeys.git_token}":[email protected]/"{account}"/"{project_name}".git')
get_ipython().system(u'git pull origin "{branch}"')
get_ipython().system(u'git checkout main')
else:
get_ipython().magic(u'cd "{project_path}"')
# In[2]:
import os
import numpy as np
import pandas as pd
import scipy.sparse
from scipy.spatial.distance import correlation
# In[13]:
df = pd.read_parquet('./data/silver/rating.parquet.gz')
df.info()
# In[16]:
df2 = pd.read_parquet('./data/silver/items.parquet.gz')
df2.info()
# In[17]:
df = | pd.merge(df, df2, on='itemId') | pandas.merge |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.