prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import glob
from string import ascii_uppercase
from functools import partial
## Environmental Variables + Medical condition
from ..processing.base_processing import read_ethnicity_data
from ..environment_processing.base_processing import path_features , path_predictions, path_inputs_env, path_target_residuals, ETHNICITY_COLS, path_input_env_inputed, path_input_env
from ..environment_processing.disease_processing import read_infectious_diseases_data, read_infectious_disease_antigens_data
from ..environment_processing.FamilyHistory import read_family_history_data
from ..environment_processing.HealthAndMedicalHistory import read_breathing_data, read_cancer_screening_data, read_chest_pain_data, read_claudication_data, read_eye_history_data, \
read_general_health_data, read_general_pain_data, read_hearing_data, read_medication_data, read_mouth_teeth_data
from ..environment_processing.LifestyleAndEnvironment import read_alcohol_data, read_diet_data, read_electronic_devices_data, read_physical_activity_data, read_sexual_factors_data,\
read_sleep_data, read_sun_exposure_data, read_smoking_data
from ..environment_processing.PsychosocialFactors import read_mental_health_data, read_social_support_data
from ..environment_processing.SocioDemographics import read_education_data, read_employment_data, read_household_data, read_other_sociodemographics_data
from ..environment_processing.HealthRelatedOutcomes import read_medical_diagnoses_data
from ..environment_processing.EarlyLifeFactors import read_early_life_factors_data
## Biomarkers as environmental predictors
from ..processing.anthropometry_processing import read_anthropometry_impedance_data, read_anthropometry_body_size_data
from ..processing.arterial_stiffness_processing import read_arterial_stiffness_data
from ..processing.biochemestry_processing import read_blood_biomarkers_data, read_urine_biomarkers_data, read_blood_count_data
from ..processing.blood_pressure_processing import read_blood_pressure_data
#from ..processing.body_composition_processing import read_body_composition_data
#from ..processing.bone_composition_processing import read_bone_composition_data
from ..processing.brain_processing import read_grey_matter_volumes_data, read_subcortical_volumes_data, read_brain_dMRI_weighted_means_data
from ..processing.carotid_ultrasound_processing import read_carotid_ultrasound_data
from ..processing.ecg_processing import read_ecg_at_rest_data
from ..processing.eye_processing import read_eye_acuity_data, read_eye_autorefraction_data, read_eye_intraocular_pressure_data
from ..processing.heart_processing import read_heart_PWA_data, read_heart_size_data
from ..processing.spirometry_processing import read_spirometry_data
from ..processing.bone_densitometry_processing import read_bone_densitometry_data
from ..processing.hand_grip_strength_processing import read_hand_grip_strength_data
from ..processing.hearing_tests_processing import read_hearing_test_data
from ..processing.cognitive_tests_processing import read_reaction_time_data, read_matrix_pattern_completion_data, read_tower_rearranging_data, \
read_symbol_digit_substitution_data, read_paired_associative_learning_data, \
read_prospective_memory_data, read_numeric_memory_data, read_fluid_intelligence_data, read_trail_making_data , \
read_pairs_matching_data
dict_target_to_instance_and_id = {"Brain" : (2, 100),
"UrineAndBlood" : (0, 100079),
"HeartPWA" : (2, 128),
"Heart" : (2, 102),
"Eye" : (0, 100013),
"EyeIntraoculaPressure" : (0, 100015),
"AnthropometryImpedance" : (0, 100008),
"BrainGreyMatterVolumes" : (2, 1101),
"AnthropometryBodySize" : (0, 100010),
"UrineBiochemestry" : (0, 100083),
"ArterialAndBloodPressure" : (0, 'Custom'),
"Spirometry" : (0, 100020),
"ECGAtRest" : (2, 12657),
"EyeAutorefraction" : (0, 100014),
"ArterialStiffness" : (0, 100007),
"BloodCount" : (0, 100081),
"BrainSubcorticalVolumes" : (2, 1102),
"EyeAcuity" : (0, 100017),
"HeartSize" : (2, 133),
"BloodPressure" : (0, 100011),
"SpiroAndArterialAndBp" : (0, 'Custom'),
"HeartImages" : (2, 'Alan'),
"BloodBiochemestry" : (0, 17518),
"Blood" : (0, 100080),
"Anthropometry" : (0, 100008),
"LiverImages" : (2, 'Alan')
}
map_envdataset_to_dataloader_and_field = {
## Environmental
'Alcohol' : (read_alcohol_data, 100051),
'Diet' : (read_diet_data, 100052),
'Education' : (read_education_data, 100063),
'ElectronicDevices' : (read_electronic_devices_data, 100053),
'Employment' : (read_employment_data, 100064),
'FamilyHistory' : (read_family_history_data, 100034),
'Eyesight' : (read_eye_history_data, 100041),
'Mouth' : (read_mouth_teeth_data, 100046),
'GeneralHealth' : (read_general_health_data, 100042),
'Breathing' : (read_breathing_data, 100037),
'Claudification' : (read_claudication_data, 100038),
'GeneralPain' : (read_general_pain_data, 100048),
'ChestPain' : (read_chest_pain_data, 100039),
'CancerScreening' : (read_cancer_screening_data, 100040),
'Medication': (read_medication_data, 100045),
'Hearing' : (read_hearing_data, 100043),
'Household' : (read_household_data, 100066),
'MentalHealth' : (read_mental_health_data, 100060),
'OtherSociodemographics' : (read_other_sociodemographics_data, 100067),
'PhysicalActivityQuestionnaire' : (read_physical_activity_data, 100054),
'SexualFactors' : (read_sexual_factors_data, 100056),
'Sleep' : (read_sleep_data, 100057),
'SocialSupport' : (read_social_support_data, 100061),
'SunExposure' : (read_sun_exposure_data, 100055),
'EarlyLifeFactors' : (read_early_life_factors_data, 100033),
'Smoking' : (read_smoking_data, 3466),
## Biomarkers
# Anthropometry
'AnthropometryImpedance' : (read_anthropometry_impedance_data, 100008),
'AnthropometryBodySize' : (read_anthropometry_body_size_data, 100010),
# Arterial Stiffness
'ArterialStiffness' : (read_arterial_stiffness_data, 100007),
# Urine And Blood
'BloodBiochemistry' : (read_blood_biomarkers_data, 17518),
'BloodCount' : (read_blood_count_data, 100081),
'UrineBiochemistry' : (read_urine_biomarkers_data, 100083),
# BloodPressure
'BloodPressure' : (read_blood_pressure_data, 100011),
# Brain
'BrainGreyMatterVolumes' : (read_grey_matter_volumes_data, 1101),
'BrainSubcorticalVolumes' : (read_subcortical_volumes_data, 1102),
'BraindMRIWeightedMeans' : (read_brain_dMRI_weighted_means_data, 135),
# carotid_ultrasound
'CarotidUltrasound' : (read_carotid_ultrasound_data, 101),
# ECG
'ECGAtRest' : (read_ecg_at_rest_data, 12657),
# Eye
'EyeAcuity' : (read_eye_acuity_data, 100017),
'EyeAutorefraction' : (read_eye_autorefraction_data, 100014),
'EyeIntraocularPressure' : (read_eye_intraocular_pressure_data, 100015),
# Heart
'HeartPWA' : (read_heart_PWA_data, 128),
'HeartSize' : (read_heart_size_data, 133),
# Spirometry
'Spirometry' : (read_spirometry_data, 100020),
# Hearing
'HearingTest' : (read_hearing_test_data, 100049),
# HandGripStrength :
'HandGripStrength' : (read_hand_grip_strength_data, 100019),
# Bone BoneDensitometryOfHeel :
'BoneDensitometryOfHeel' : (read_bone_densitometry_data, 100018),
# Cognitive
'CognitiveReactionTime' : (read_reaction_time_data, 100032),
'CognitiveMatrixPatternCompletion' : (read_matrix_pattern_completion_data, 501),
'CognitiveTowerRearranging' : (read_tower_rearranging_data, 503),
'CognitiveSymbolDigitSubstitution' : (read_symbol_digit_substitution_data, 502),
'CognitivePairedAssociativeLearning' : (read_paired_associative_learning_data, 506),
'CognitiveProspectiveMemory' : (read_prospective_memory_data, 100031),
'CognitiveNumericMemory' : (read_numeric_memory_data, 100029),
'CognitiveFluidIntelligence' : (read_fluid_intelligence_data, 100027),
'CognitiveTrailMaking' : (read_trail_making_data, 505),
'CognitivePairsMatching' : (read_pairs_matching_data, 100030),
'PhysicalActivity' : (read_physical_activity_data, 'Sasha')
}
## Medical
medical_diagnoses_dict = dict(zip(['medical_diagnoses_%s' % letter for letter in ascii_uppercase], [(partial(read_medical_diagnoses_data, letter = letter), 41270) for letter in ascii_uppercase]))
map_envdataset_to_dataloader_and_field = {**map_envdataset_to_dataloader_and_field, **medical_diagnoses_dict}
def create_data(env_dataset, **kwargs):
## env_dataset is just name not a path at this point.
if env_dataset not in map_envdataset_to_dataloader_and_field.keys():
raise ValueError('Wrong dataset name ! ')
else :
dataloader, field = map_envdataset_to_dataloader_and_field[env_dataset]
df = dataloader(**kwargs)
df.to_csv(path_inputs_env + env_dataset + '.csv')
def load_sex_age_ethnicity_data(**kwargs):
df_sex_age_ethnicity_eid = pd.read_csv('/n/groups/patel/samuel/sex_age_eid_ethnicity.csv').set_index('id')
return df_sex_age_ethnicity_eid
def load_data_env(env_dataset, **kwargs):
if 'medical_diagnoses' in env_dataset :
path_env = path_inputs_env + env_dataset + '.csv'
df = pd.read_csv(path_env)
df_sex_age_ethnicity = pd.read_csv('/n/groups/patel/Alan/Aging/Medical_Images/data/data-features_instances.csv').drop(columns = ['Abdominal_images_quality', 'instance', 'outer_fold'])
df_sex_age_ethnicity = df_sex_age_ethnicity.rename(columns = {'Age' : 'Age when attended assessment centre'})
df = df_sex_age_ethnicity.merge(df, on = 'eid').set_index('id')
return df
else :
env_dataset = env_dataset.replace('\\', '')
use_inputed = True
## Find columns to read
path_env = path_inputs_env + env_dataset + '.csv'
cols_to_read = pd.read_csv(path_env, nrows = 2).set_index('id').columns
## Features + eid + id / without ethnicity + age + sex
cols_to_read = [elem for elem in cols_to_read if elem not in ['Ethnicity', 'Age when attended assessment centre', 'Sex', 'eid', 'Unnamed: 0'] + ETHNICITY_COLS + ['Ethnicity.' + elem for elem in ETHNICITY_COLS]] + ['id']
if use_inputed == True:
df = pd.read_csv(path_input_env_inputed, usecols = cols_to_read).set_index('id')
else :
df = pd.read_csv(path_input_env, usecols = cols_to_read).set_index('id')
df_sex_age_ethnicity = pd.read_csv('/n/groups/patel/Alan/Aging/Medical_Images/data/data-features_instances.csv').set_index('id').drop(columns = ['Abdominal_images_quality', 'instance', 'outer_fold'])
df_sex_age_ethnicity = df_sex_age_ethnicity.rename(columns = {'Age' : 'Age when attended assessment centre'})
#df_age_new =
df = df.join(df_sex_age_ethnicity)
return df
def load_cluster(env_dataset, target_dataset, **kwargs):
df = | pd.read_csv('/n/groups/patel/samuel/EWAS/AutomaticClusters/%s_%s.csv' % (env_dataset, target_dataset)) | pandas.read_csv |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights1 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLH5'), # extra day for no weight instrument
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')
])
rets = pd.Series([0.02, -0.03, 0.06, 0.05, 0.01], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights1)
# leading / trailing returns
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights2 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-05'), 'CLF5')])
rets = pd.Series([0.02, -0.03, 0.06, 0.05], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights2)
def test_to_notional_empty():
instrs = pd.Series()
prices = pd.Series()
multipliers = pd.Series()
res_exp = pd.Series()
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_same_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_extra_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2, 13.1], index=['CLZ6', 'COZ6',
'GCZ6', 'extra'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_missing_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, pd.np.NaN],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_different_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
res_exp = pd.Series([-30.20, 2 * 30.5 / 1.32, 10.2 * 0.8],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
assert_series_equal(res, res_exp)
def test_to_notional_duplicates():
instrs = pd.Series([1, 1], index=['A', 'A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37, 200.37], index=['A', 'A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100, 100], index=['A', 'A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD', 'USD'], index=['A', 'A'])
fx_rate = pd.Series([1.32], index=['USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD'], index=['A'])
fx_rate = pd.Series([1.32, 1.32], index=['USDCAD', 'USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
def test_to_notional_bad_fx():
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
instr_fx = pd.Series(['JPY'], index=['A'])
fx_rates = pd.Series([1.32], index=['GBPCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
def test_to_contracts_rounder():
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
multipliers = pd.Series([1, 1], index=['CLZ6', 'COZ6'])
# 30.19 / 30.20 is slightly less than 1 so will round to 0
notional = pd.Series([30.19, 2 * 30.5], index=['CLZ6', 'COZ6'])
res = util.to_contracts(notional, prices, multipliers,
rounder=pd.np.floor)
res_exp = pd.Series([0, 2], index=['CLZ6', 'COZ6'])
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier():
notionals = pd.Series([-30.20, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier_rounding():
# won't work out to integer number of contracts so this tests rounding
notionals = pd.Series([-30.21, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_trade_with_zero_amount():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, 0], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) + 0 * 0.5 / (50.41*100) - 1,
# 0 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 19], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_trade_all_zero_amount_return_empty():
wts = pd.DataFrame([1], index=["CLX16"], columns=[0])
desired_holdings = pd.Series([13], index=[0])
current_contracts = 0
prices = pd.Series([50.32], index=['CLX16'])
multiplier = pd.Series([100], index=['CLX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
exp_trades = pd.Series(dtype="int64")
assert_series_equal(trades, exp_trades)
def test_trade_one_asset():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_multi_asset():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=["CL0", "CL1"])
wts2 = pd.DataFrame([1], index=["COX16"], columns=["CO0"])
wts = {"CL": wts1, "CO": wts2}
desired_holdings = pd.Series([200000, -50000, 100000],
index=["CL0", "CL1", "CO0"])
current_contracts = pd.Series([0, 1, 0, 5],
index=['CLX16', 'CLZ16', 'CLF17',
'COX16'])
prices = pd.Series([50.32, 50.41, 50.48, 49.50],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
multiplier = pd.Series([100, 100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
# 100000 * 1 / (49.50*100) - 5,
exp_trades = pd.Series([20, 14, -5, 15], index=['CLX16', 'CLZ16',
'CLF17', 'COX16'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_extra_desired_holdings_without_weights():
wts = pd.DataFrame([0], index=["CLX16"], columns=["CL0"])
desired_holdings = pd.Series([200000, 10000], index=["CL0", "CL1"])
current_contracts = pd.Series([0], index=['CLX16'])
prices = pd.Series([50.32], index=['CLX16'])
multipliers = pd.Series([1], index=['CLX16'])
with pytest.raises(ValueError):
util.calc_trades(current_contracts, desired_holdings, wts, prices,
multipliers)
def test_trade_extra_desired_holdings_without_current_contracts():
# this should treat the missing holdings as 0, since this would often
# happen when adding new positions without any current holdings
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1],
index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
# non existent contract holdings result in fill value being a float,
# which casts to float64
assert_series_equal(trades, exp_trades, check_dtype=False)
def test_trade_extra_weights():
# extra weights should be ignored
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000], index=[0])
current_contracts = pd.Series([0, 2], index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
multiplier = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 2,
exp_trades = pd.Series([20, 18], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_get_multiplier_dataframe_weights():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000], index=["CL"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dict_weights():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
wts2 = pd.DataFrame([0.5, 0.5], index=["COX16", "COZ16"], columns=[0])
wts = {"CL": wts1, "CO": wts2}
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16", "COX16",
"COZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dataframe_weights_multiplier_asts_error():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
with pytest.raises(ValueError):
util.get_multiplier(wts, ast_mult)
def test_weighted_expiration_two_generics():
vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF15'),
(TS('2015-01-03'), 'CLG15'),
(TS('2015-01-04'), 'CLF15'),
(TS('2015-01-04'), 'CLG15'),
(TS('2015-01-04'), 'CLH15'),
(TS('2015-01-05'), 'CLG15'),
(TS('2015-01-05'), 'CLH15')])
weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=idx)
contract_dates = pd.Series([TS('2015-01-20'),
TS('2015-02-21'),
TS('2015-03-20')],
index=['CLF15', 'CLG15', 'CLH15'])
wexp = util.weighted_expiration(weights, contract_dates)
exp_wexp = pd.DataFrame([[17.0, 49.0], [32.0, 61.5], [47.0, 74.0]],
index=[TS('2015-01-03'),
TS('2015-01-04'),
TS('2015-01-05')],
columns=["CL1", "CL2"])
assert_frame_equal(wexp, exp_wexp)
def test_flatten():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_dict():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5')])
weights2 = pd.DataFrame(1, index=widx, columns=["CO1"])
weights = {"CL": weights1, "CO": weights2}
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [TS('2015-01-03')]),
"contract": (['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2
+ ["COF5"]),
"generic": ["CL1", "CL2"] * 4 + ["CO1"],
"weight": [1, 0, 0, 1, 1, 0, 0, 1, 1],
"key": ["CL"] * 8 + ["CO"]}
).loc[:, ["date", "contract", "generic", "weight", "key"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_bad_input():
dummy = 0
with pytest.raises(ValueError):
util.flatten(dummy)
def test_unflatten():
flat_wts = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
wts = util.unflatten(flat_wts)
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')],
names=("date", "contract"))
cols = pd.Index(["CL1", "CL2"], name="generic")
wts_exp = pd.DataFrame(vals, index=widx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_unflatten_dict():
flat_wts = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [TS('2015-01-03')]),
"contract": (['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2
+ ["COF5"]),
"generic": ["CL1", "CL2"] * 4 + ["CO1"],
"weight": [1, 0, 0, 1, 1, 0, 0, 1, 1],
"key": ["CL"] * 8 + ["CO"]}
).loc[:, ["date", "contract", "generic", "weight", "key"]]
wts = util.unflatten(flat_wts)
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')],
names=("date", "contract"))
cols = pd.Index(["CL1", "CL2"], name="generic")
weights1 = pd.DataFrame(vals, index=widx, columns=cols)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5')],
names=("date", "contract"))
cols = pd.Index(["CO1"], name="generic")
weights2 = pd.DataFrame(1, index=widx, columns=cols)
wts_exp = {"CL": weights1, "CO": weights2}
assert_dict_of_frames(wts, wts_exp)
def test_reindex():
# related to https://github.com/matthewgilbert/mapping/issues/11
# no op
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5')])
prices = pd.Series([103, 101, 102, 100], index=idx)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5')])
new_prices = util.reindex(prices, widx, limit=0)
exp_prices = prices
assert_series_equal(exp_prices, new_prices)
# missing front prices error
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5')])
prices = pd.Series([100], index=idx)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5')])
with pytest.raises(ValueError):
util.reindex(prices, widx, 0)
# NaN returns introduced and filled
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')])
prices = pd.Series([100, 101, 102, 103, 104], index=idx)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLF5'),
(TS('2015-01-05'), 'CLH5'),
(TS('2015-01-06'), 'CLF5'),
(TS('2015-01-06'), 'CLH5')])
new_prices = util.reindex(prices, widx, limit=1)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-01'), 'CLH5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLF5'),
(TS('2015-01-05'), 'CLH5'),
(TS('2015-01-06'), 'CLF5'),
(TS('2015-01-06'), 'CLH5')
])
exp_prices = pd.Series([100, np.NaN, 101, 102, 103, 104, 103,
104, np.NaN, np.NaN], index=idx)
assert_series_equal(exp_prices, new_prices)
# standard subset
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-01'), 'CHF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')])
prices = pd.Series([100, 101, 102, 103, 104, 105, 106, 107], index=idx)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5')])
new_prices = util.reindex(prices, widx, limit=0)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5')])
exp_prices = pd.Series([100, 102, 103, 104, 105], index=idx)
assert_series_equal(exp_prices, new_prices)
# check unique index to avoid duplicates from pd.Series.reindex
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
prices = pd.Series([100.10, 101.13], index=idx)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
( | TS('2015-01-03') | pandas.Timestamp |
from sacred.observers import MongoObserver, FileStorageObserver
from pandas import Series
from pymongo import MongoClient
from gridfs import GridFS
from tensorflow.python.summary.summary_iterator import summary_iterator
import xview.settings as settings
from xview.datasets import get_dataset
from bson.json_util import dumps
import json
import zipfile
from os import path, listdir
from numpy import array, nan, inf
from copy import deepcopy
def load_data(data_config):
"""
Load the data specified in the data_config dict.
"""
dataset_params = {key: val for key, val in data_config.items()
if key not in ['dataset', 'use_trainset']}
return get_dataset(data_config['dataset'], dataset_params)
def get_observer():
if hasattr(settings, 'EXPERIMENT_DB_HOST') and settings.EXPERIMENT_DB_HOST:
return MongoObserver.create(url='mongodb://{user}:{pwd}@{host}/{db}'.format(
host=settings.EXPERIMENT_DB_HOST,
user=settings.EXPERIMENT_DB_USER,
pwd=settings.EXPERIMENT_DB_PWD,
db=settings.EXPERIMENT_DB_NAME),
db_name=settings.EXPERIMENT_DB_NAME)
elif hasattr(settings, 'EXPERIMENT_STORAGE_FOLDER') \
and settings.EXPERIMENT_STORAGE_FOLDER:
return FileStorageObserver.create(settings.EXPERIMENT_STORAGE_FOLDER)
else:
raise UserWarning("No observer settings found.")
def reverse_convert_datatypes(data):
if isinstance(data, dict):
if 'values' in data and len(data) == 1:
return reverse_convert_datatypes(data['values'])
if 'py/tuple' in data and len(data) == 1:
return reverse_convert_datatypes(data['py/tuple'])
if 'py/object' in data and data['py/object'] == 'numpy.ndarray':
if 'dtype' in data:
return array(data['values'], dtype=data['dtype'])
else:
return array(data['values'])
for key in data:
data[key] = reverse_convert_datatypes(data[key])
return data
elif isinstance(data, list):
return [reverse_convert_datatypes(item) for item in data]
elif isinstance(data, str) and data[0] == '[':
return eval(data)
return data
class ExperimentData:
"""Loads experimental data from experiments database."""
def __init__(self, exp_id):
"""Load data for experiment with id 'exp_id'.
Follwing the settings, data is either loaded from the mongodb connection or,
as a fallback, from the specified directory.
"""
if hasattr(settings, 'EXPERIMENT_DB_HOST') and settings.EXPERIMENT_DB_HOST:
client = MongoClient('mongodb://{user}:{pwd}@{host}/{db}'.format(
host=settings.EXPERIMENT_DB_HOST, user=settings.EXPERIMENT_DB_USER,
pwd=settings.EXPERIMENT_DB_PWD, db=settings.EXPERIMENT_DB_NAME))
self.db = client[settings.EXPERIMENT_DB_NAME]
self.fs = GridFS(self.db)
self.record = self.db.runs.find_one({'_id': exp_id})
self.artifacts = [artifact['name']
for artifact in self.record['artifacts']]
elif hasattr(settings, 'EXPERIMENT_STORAGE_FOLDER') \
and settings.EXPERIMENT_STORAGE_FOLDER:
if str(exp_id) in listdir(settings.EXPERIMENT_STORAGE_FOLDER):
self.exp_path = path.join(settings.EXPERIMENT_STORAGE_FOLDER, str(exp_id))
with open(path.join(self.exp_path, 'run.json')) as run_json:
record = json.load(run_json)
with open(path.join(self.exp_path, 'info.json')) as info_json:
record['info'] = json.load(info_json)
with open(path.join(self.exp_path, 'config.json')) as config_json:
record['config'] = json.load(config_json)
with open(path.join(self.exp_path, 'cout.txt')) as captured_out:
record['captured_out'] = captured_out.read()
self.artifacts = listdir(self.exp_path)
elif '%s.zip' % exp_id in listdir(settings.EXPERIMENT_STORAGE_FOLDER):
self.zipfile = path.join(settings.EXPERIMENT_STORAGE_FOLDER,
'%s.zip' % exp_id)
archive = zipfile.ZipFile(self.zipfile)
record = json.loads(archive.read('run.json').decode('utf8'))
record['info'] = json.loads(archive.read('info.json').decode('utf8'))
record['config'] = json.loads(archive.read('config.json').decode('utf8'))
record['captured_out'] = archive.read('cout.txt')
archive.close()
self.artifacts = archive.namelist()
else:
raise UserWarning('Specified experiment %s not found.' % exp_id)
self.record = record
def get_record(self):
"""Get sacred record for experiment."""
return reverse_convert_datatypes(deepcopy(self.record))
def get_artifact(self, name):
"""Return the produced outputfile with given name as file-like object."""
if hasattr(self, 'fs'):
if name not in self.artifacts:
raise UserWarning('ERROR: Artifact {} not found'.format(name))
artifact_id = next(artifact['file_id']
for artifact in self.record['artifacts']
if artifact['name'] == name)
return self.fs.get(artifact_id)
elif hasattr(self, 'exp_path'):
if name not in self.artifacts:
raise UserWarning('ERROR: Artifact {} not found'.format(name))
return open(path.join(self.exp_path, name))
else:
if name not in self.artifacts:
raise UserWarning('ERROR: Artifact {} not found'.format(name))
archive = zipfile.ZipFile(self.zipfile)
return archive.open(name)
def get_summary(self, tag):
"""Return pd.Series of scalar summary value with given tag."""
search = [artifact for artifact in self.artifacts if 'events' in artifact]
if not len(search) > 0:
raise UserWarning('ERROR: Could not find summary file')
summary_file = search[0]
tmp_file = '/tmp/summary'
with open(tmp_file, 'wb') as f:
f.write(self.get_artifact(summary_file).read())
iterator = summary_iterator(tmp_file)
# go through all the values and store them
step = []
value = []
for event in iterator:
for measurement in event.summary.value:
if (measurement.tag == tag):
step.append(event.step)
value.append(measurement.simple_value)
return | Series(value, index=step) | pandas.Series |
# -*- coding: utf-8 -*-
"""
@file:maketrain.py
@time:2019/5/6 16:42
@author:Tangj
@software:Pycharm
@Desc
"""
import pandas as pd
import numpy as np
import gc
import time
name = ['log_0_1999', 'log_2000_3999', 'log_4000_5999','log_6000_7999', 'log_8000_9999', 'log_10000_19999',
'log_20000_29999', 'log_30000_39999','log_40000_49999',
'log_50000_59999','log_60000_69999','log_70000_79999','log_80000_89999','log_90000_99999',
'log_100000_109999','log_110000_119999','log_120000_129999','log_130000_139999']
def group_split(list_values):
new_values = []
for values in list_values:
vals = values.split(',')
for i in vals:
if i not in new_values:
new_values.append(i)
new_values.sort()
if 'all' in new_values:
return 'all'
str_val = new_values[0]
flag = 1
for i in new_values:
if flag == 1:
str_val = str(i)
flag = 0
else:
str_val = str_val + ',' + str(i)
return str_val
def putting_time_process(put_time):
bi_val = [0] * 48
for time in put_time:
time = int(time)
bi_time = bin(time)
j = 0
num = len(bi_time) - 1
while num > 1:
bi_val[j] += int(bi_time[num])
num -= 1
j += 1
n = 47
flag = 1
times = '0'
total = 0
while n >= 0:
if bi_val[n] >= 1:
val = 1
total += 1
else:
val = 0
if flag == 1:
flag = 0
times = str(val)
else:
times = times + str(val)
n -= 1
re_times1 = int(times, 2)
return re_times1, times, total
def disstatus(train, option):
print("status processing")
distime = []
opstatus = option[option['changeField'] == 1]
opstatus.index = opstatus['statime']
opstatus.sort_index()
opstatus.index = range(opstatus.shape[0])
values = opstatus['changeValue']
optime = opstatus['statime'].values
flag = 1
j = 0
for i in values:
if (i == '0') & (flag == 1):
distime.append(optime[j])
flag = 0
if (i == '1') & (flag == 0):
distime.append(optime[j])
flag = 1
j += 1
j = 0
if len(distime) == 0:
return train
elif (len(distime) % 2 == 0):
for i in range(int(len(distime) / 2)):
Train = pd.DataFrame()
t1 = distime[j]
t2 = distime[j + 1]
# print(t1)
# print(t2)
j += 2
train1 = train[train['statime'] < t1]
# print(train1['Reqday'].unique())
Train = pd.concat([Train, train1])
train1 = train[train['statime'] > t2]
# print(train1['Reqday'].unique())
Train = pd.concat([Train, train1])
train = Train
else:
t1 = distime[-1]
train = train[train['statime'] < t1]
Train = pd.DataFrame()
for i in range(int(len(distime) / 2)):
Train = pd.DataFrame()
t1 = distime[j]
t2 = distime[j + 1]
j += 2
train1 = train[train['statime'] < t1]
Train = pd.concat([Train, train1])
train2 = train[train['statime'] > t2]
Train = | pd.concat([Train, train2]) | pandas.concat |
import random
import itertools as it
import pandas as pd
import pandera as pa
import numpy as np
import plotly.graph_objects as pg
import textwrap as tw
def create_transactions(
date1: str,
date2: str,
freq: str="12H",
income_categories: list[str]=None,
expense_categories: list[str]=None,
) -> pd.DataFrame:
"""
Create a DataFrame of sample transactions between the given dates
(date strings that Pandas can interpret, such as YYYYMMDD) and at
the given Pandas frequency.
The columns will be all those readable by the function :func:`read_transactions`.
Each positive transaction will be assigned a income category from
the given list ``income_categories``, and each negative transaction
will be assigned a expense category from the given list
``expense_categories``.
If these lists are not given, then whimsical default ones will be
created.
"""
# Create date range
rng = pd.date_range(date1, date2, freq=freq, name="date")
n = len(rng)
# Create random amounts
low = -70
high = 100
f = pd.DataFrame(
np.random.randint(low, high, size=(n, 1)), columns=["amount"], index=rng
)
f = f.reset_index()
# Create random descriptions and comments
f["description"] = [hex(random.getrandbits(20)) for i in range(n)]
f["comment"] = [hex(random.getrandbits(40)) for i in range(n)]
# Categorize amounts
if income_categories is None:
income_categories = ["programming", "programming", "investing", "reiki"]
if expense_categories is None:
expense_categories = [
"food",
"shelter",
"shelter",
"transport",
"healthcare",
"soil testing",
]
def categorize(x):
if x > 0:
return random.choice(income_categories)
else:
return random.choice(expense_categories)
f["category"] = f["amount"].map(categorize)
f["category"] = f["category"].astype("category")
return f
SCHEMA = pa.DataFrameSchema({
"date": pa.Column(pa.String),
"amount": pa.Column(pa.Float, coerce=True),
"description": pa.Column(pa.String, required=False, coerce=True),
"category": pa.Column(pa.String, required=False, coerce=True),
"comment": pa.Column(pa.String, required=False, coerce=True, nullable=True),
})
def validate_transactions(transactions: pd.DataFrame) -> pd.DataFrame:
"""
Raise a Pandera SchemaError if the given DataFrame of transactions does not
agree with the schema :const:SCHEMA.
Otherwise, return the DataFrame as is.
"""
return SCHEMA.validate(transactions)
def read_transactions(path: str, date_format: str=None, **kwargs) -> pd.DataFrame:
"""
Read a CSV file of transactions located at the given path (string
or Path object), parse the date and category, and return the
resulting DataFrame.
The CSV should contain at least the following columns
- ``'date'``: string
- ``'amount'``: float; amount of transaction; positive or negative,
indicating a income or expense, respectively
- ``'description'`` (optional): string; description of transaction,
e.g. 'dandelion and burdock tea'
- ``'category'`` (optional): string; categorization of description,
e.g. 'healthcare'
- ``'comment'`` (optional): string; comment on transaction, e.g.
'a gram of prevention is worth 62.5 grams of cure'
If the date format string ``date_format`` is given, e.g
``'%Y-%m-%d'``, then parse dates using that format; otherwise use
let Pandas guess the date format.
"""
f = (
pd.read_csv(path, **kwargs)
.rename(lambda x: x.strip().lower(), axis="columns")
.filter(["date", "amount", "description", "category", "comment"])
.pipe(validate_transactions)
)
# Parse some
f["date"] = pd.to_datetime(f["date"], format=date_format)
if "category" in f.columns:
f["category"] = f["category"].str.lower()
f["category"] = f["category"].astype("category")
return f.sort_values(["date", "amount"])
def insert_repeating(
transactions: pd.DataFrame,
amount: float,
freq: str,
description: str=None,
category: str=None,
comment: str=None,
start_date: str=None,
end_date: str=None,
) -> pd.DataFrame:
"""
Given a DataFrame of transactions, add to it a repeating transaction
at the given frequency for the given amount with the given optional
description, category, and comment.
Restrict the repeating transaction to the given start and end dates
(date objects), inclusive.
If no start date is given, then repeat from the first transaction date.
If no end date is given, then repeat to the last transaction date.
Drop duplicate rows and return the resulting DataFrame.
"""
f = transactions.copy()
if start_date is None:
start_date = f["date"].min()
if end_date is None:
end_date = f["date"].max()
g = pd.DataFrame([])
dates = pd.date_range(start_date, end_date, freq=freq)
g["date"] = dates
g["amount"] = amount
if description is not None:
g["description"] = description
if category is not None:
g["category"] = category
g["category"] = g["category"].astype("category")
if comment is not None:
g["comment"] = comment
h = pd.concat([f, g]).drop_duplicates().sort_values(["date", "amount"])
if "category" in h.columns:
h["category"] = h["category"].astype("category")
return h
def summarize(
transactions: pd.DataFrame,
freq: str="MS",
decimals: int=2,
start_date: str=None,
end_date: str=None,
) -> dict:
"""
Given a DataFrame of transactions, slice it from the given start
date to and including the given end date date (strings that Pandas
can interpret, such as YYYYMMDD) if specified, drop unused categories,
and return a dictionary with the keys 'by_none', 'by_period', 'by_category',
'by_category_and_period' whose corresponding values are DataFrames with the
following columns.
- key "by_none"
* ``"start_date"``: first transaction date
* ``"end_date"``: last transaction date
* ``"income"``: sum of positive transaction amounts for the date range
* ``"expense"``: absolute value of sum of negative transaction amounts for the
date range
* ``"balance"``: income - expense
* ``"savings_pc"``: 100 * balance / income
- key "by_period"
* ``"date"``: date of period after date range has been resampled at
frequency ``freq``
* ``"income"``: sum of positive transaction amounts for the period
* ``"expense"``: absolute value of sum of negative transactions for the period
* ``"balance"``: income - expense for the period
* ``"savings_pc``: 100 * balance / income
* ``"cumulative_income"``: income plus the incomes of all previous periods
* ``"cumulative_balance"``: balance plus the balances of all previous periods
* ``"cumulative_savings_pc"``: 100 * cumulative_balance / cumulative_income
- key "by_category"
* ``"category"``: category of transactions
* ``"income"``: sum of positive transaction amounts for the category and
date range
* ``"expense"``: absolute value of sum of negative transaction amounts for the
category and date range
* ``"balance"``: income - expense
* ``"income_to_total_income_pc"``: 100 * income / (total income for date range)
* ``"expense_to_total_income_pc"``: 100 * expense / (total income for date range)
* ``"expense_to_total_expense_pc"``: 100 * expense / (total expense for date range)
* ``"daily_avg_balance"``: sum of all amounts for category divided by the number
of days in the date range
* ``"weekly_avg_balance"``: sum of all amounts for category divided by the number
of weeks in the date range
* ``"monthly_avg_balance"``: sum of all amounts for category divided by the number
of months in the date range
* ``"yearly_avg_balance"``: sum of all amounts for category divided by the number
of years in the date range
- key "by_category_and_period"
* ``"date"``: date of period after date range has been resampled at
frequency ``freq``
* ``"category"``: category of transactions
* ``"income"``: sum of positive transaction amounts for the period and category
* ``"expense"``: absolute value of sum of negative transactions for the period
and category
* ``"balance"``: income - expense for the period and category
* ``"income_to_period_income_pc"``: 100 * income / (total income for period)
* ``"expense_to_period_income_pc"``: 100 * expense / (total income for period)
* ``"expense_to_period_expense_pc"``: 100 * expense / (total expense for period)
Round all values to the given number of decimals, or set ``decimals=None`` to avoid rounding.
"""
f = transactions.copy()
# Filter to start and end dates
if start_date is None:
start_date = f["date"].min()
else:
start_date = pd.to_datetime(start_date)
if end_date is None:
end_date = f["date"].max()
else:
end_date = pd.to_datetime(end_date)
f = f.loc[lambda x: (x.date >= start_date) & (x.date <= end_date)].copy()
if "category" in f.columns:
has_category = True
# Removed unused categories
f.category = f.category.cat.remove_unused_categories()
else:
has_category = False
# Create income and expense columns
f["income"] = f.amount.map(lambda x: x if x > 0 else 0)
f["expense"] = f.amount.map(lambda x: -x if x < 0 else 0)
f["balance"] = f.income - f.expense
f = f.filter(["date", "category", "income", "expense", "balance"])
# Count some dates
delta = end_date - start_date
num_days = delta.days + 1
num_weeks = num_days / 7
num_months = num_days / (365 / 12)
num_years = num_days / 365
result = {}
# By none
result["by_none"] = (
f
.assign(
start_date=start_date,
end_date=end_date,
)
.groupby(["start_date", "end_date"])
.sum()
.reset_index()
.assign(
savings_pc=lambda x: 100 * x.balance / x.income,
)
)
total = result["by_none"].to_dict("records")[0]
# By period
period = | pd.Grouper(freq=freq, label="left", closed="left") | pandas.Grouper |
"""
helper functions for `goenrich`
"""
import pandas as pd
import numpy as np
def generate_background(annotations, df, go_id, entry_id):
""" generate the backgound from pandas datasets
>>> O = ontology(...)
>>> annotations = goenrich.read.gene2go(...)
>>> background = generate_background(annotations, df, 'GO_ID', 'GeneID')
>>> propagate(O, background, ...)
:param annotations: pd.DataFrame containing the annotations
:param df: pd.DataFrame containing the background genes
:param go_id: GO id column name
:param entry_id: Gene id column name
:returns: dictionary with the background annotations
"""
return {k : set(v) for k,v in
| pd.merge(annotations, df[[entry_id]]) | pandas.merge |
import os
import pandas as pd
# https://github.com/CSSEGISandData/COVID-19.git
REPOSITORY = "https://raw.githubusercontent.com/CSSEGISandData"
MAIN_FOLDER = "COVID-19/master/csse_covid_19_data/csse_covid_19_time_series"
CONFIRMED_FILE = "time_series_covid19_confirmed_global.csv"
DEATHS_FILE = "time_series_covid19_deaths_global.csv"
RECOVERED_FILE = "time_series_covid19_recovered_global.csv"
CONFIRMED_PATH = os.path.join(REPOSITORY, MAIN_FOLDER, CONFIRMED_FILE)
DEATHS_PATH = os.path.join(REPOSITORY, MAIN_FOLDER, DEATHS_FILE)
RECOVERED_PATH = os.path.join(REPOSITORY, MAIN_FOLDER, RECOVERED_FILE)
def group_data_by_country(df):
df = df.drop(columns=["Lat", "Long"])
df_bycountry = df.groupby("Country/Region").sum()
# summing for all country
df_bycountry.loc["Total"] = df_bycountry.sum(axis=0)
return df_bycountry
def get_data_normalized(df):
# dividing by the sum
maximums = df.iloc[:, -1]
df_normalized = df.div(maximums.to_numpy(), axis=0)
return df_normalized
def get_data_for_sir(df_death, df_recovered, df_confirmed):
df_recovered_or_passed = df_recovered + df_death
df_infected = df_confirmed - df_recovered_or_passed
return df_recovered_or_passed, df_infected
def extract_process_data():
df_confirmed = pd.read_csv(CONFIRMED_PATH)
df_deaths = pd.read_csv(DEATHS_PATH)
df_recovered = pd.read_csv(RECOVERED_PATH)
df_confirmed_by_country = group_data_by_country(df_confirmed)
df_deaths_by_country = group_data_by_country(df_deaths)
df_recovered_by_country = group_data_by_country(df_recovered)
df_recovered_or_passed_by_country, df_infected_by_country = get_data_for_sir(
df_deaths_by_country, df_recovered_by_country, df_confirmed_by_country
)
return (
add_datetime(df_confirmed_by_country),
add_datetime(df_deaths_by_country),
add_datetime(df_recovered_by_country),
add_datetime(df_recovered_or_passed_by_country),
add_datetime(df_infected_by_country),
)
def add_datetime(df):
df.loc["Time"] = pd.period_range(df.columns[0], df.columns[-1], freq="D")
return df
def data_gouv_vue_ensemble():
"""
Données relatives à l’épidémie de COVID-19 en France : vue d’ensemble
https://www.data.gouv.fr/fr/datasets/donnees-relatives-a-lepidemie-de-covid-19-en-france-vue-densemble/#_
Columns:
date
total_cas_confirmes
total_deces_hopital
total_deces_ehpad
total_cas_confirmes_ehpad
total_cas_possibles_ehpad
patients_reanimation
patients_hospitalises
total_patients_gueris
nouveaux_patients_hospitalises
nouveaux_patients_reanimation
"""
url_stable = (
"https://www.data.gouv.fr/fr/datasets/r/d3a98a30-893f-47f7-96c5-2f4bcaaa0d71"
)
df = pd.read_csv(url_stable)
df.index = pd.to_datetime(df["date"])
df = df.drop(columns=["date"])
df = df.sort_index()
return df
def data_gouv_taux_incidence():
"""
Indicateurs de l’activité épidémique:
taux d'incidence de l'épidémie de COVID-19 par métropole
https://www.data.gouv.fr/fr/datasets/indicateurs-de-lactivite-epidemique-taux-dincidence-de-lepidemie-de-covid-19-par-metropole/
Columns:
epci2020: Code EPCI
semaine_glissante: Semaine glissante
clage_65:
0 si taux d'incidence toute classe d'âge
65 si taux d'incidence pour les personnes âgées de plus de 65 ans
ti: Nombre de nouveaux cas positifs pour 100 000 habitants sur 7 jours glissants
"""
url_stable = (
"https://www.data.gouv.fr/fr/datasets/r/61533034-0f2f-4b16-9a6d-28ffabb33a02"
)
df_main = pd.read_csv(url_stable)
df_main = df_main.rename(columns={"epci2020": "EPCI"})
# Correspondences entre le code EPCI et le nom des métropoles
url_epci = "doc/metropole-epci.csv"
df_epci = pd.read_csv(url_epci, sep=";")
df = pd.merge(df_main, df_epci, how="left", on="EPCI")
df_65 = df.loc[df["clage_65"] == 65]
df_65["semaine_glissante"] = [i[11:] for i in df_65["semaine_glissante"]]
df_65.index = pd.to_datetime(df_65["semaine_glissante"])
df_65 = df_65.drop(columns=["semaine_glissante", "clage_65", "EPCI"])
df_65 = df_65.pivot(columns="Metropole", values="ti")
df_0 = df.loc[df["clage_65"] == 0]
df_0["semaine_glissante"] = [i[11:] for i in df_0["semaine_glissante"]]
df_0.index = pd.to_datetime(df_0["semaine_glissante"])
df_0 = df_0.drop(columns=["semaine_glissante", "clage_65", "EPCI"])
df_0 = df_0.pivot(columns="Metropole", values="ti")
return df, df_65, df_0
def data_gouv_hospital():
"""
Les données relatives à les nouvelles admissions en réanimation par région :
nombre de nouveaux patients admis en réanimation dans les 24 dernières heures.
Columns:
sexe:
0: femmes + hommes
1: hommes
2: femmes
dep: Département
hosp: Nombre de personnes actuellement hospitalisées
rea: Nombre de personnes actuellement en réanimation ou soins intensifs
rad: Nombre cumulé de personnes retournées à domicile
dc: Nombre cumulé de personnes décédées à l'hôpital
"""
url_stable = (
"https://www.data.gouv.fr/fr/datasets/r/63352e38-d353-4b54-bfd1-f1b3ee1cabd7"
)
df_main = | pd.read_csv(url_stable, sep=";") | pandas.read_csv |
'''this module for the data was emported from excel sheet analysis'''
import pandas as pd
import openpyxl as xl
from openpyxl import load_workbook
import numpy as np
import os
from copy import copy
import random
from random import randint,seed
from openpyxl.chart import BarChart, Reference, Series,LineChart
from .collect import columns_quality as qc_molds
#cols=["day","machine","product_name","product_code","product_parts","standard_dry_weight",
#"standard_dry_weight_from","standard_dry_weight_to","average_dry_weight"
#,"standard_rate_hour","c_t_standard_per_second","set","rat_actually","c_t_actually","sum_scrabe_no_parts"
#,"number_scrab_by_item","gross_production_by_set","scrabe_standard"
#,"gross_production","scrap_percent_by_item","customer_id","mold_id","item_id"]
col_rename={"product_name": "اسم المنتج","product_name_by_parts":"اسم المنتج بالاجزاء"
,"product_code":"كود المنتج","product_parts":"اجزاء المنتج","machine_id":'رقم الماكينة',"number_day_use":"عدد ايام التشغيل"
,"standard_dry_weight_from":"مواصفة الوزن الجاف من","standard_dry_weight_to":"مواصفة الوزن الجاف إلي",
"weight_under_validation":"الاوزان الاقل من المواصفة","weight_above_validation":"الاوزان الاعلي من المواصفة",
"c_t_deviation":"الفارق بين المعدل المعيارى والفعلى","rat_standard_deviation":'الانحراف المعياري لمعدل الانتاج',
'rat_validation':'التحقق من مطابقة معدل الانتاج للمواصفة',
"sum_scrabe_no_parts":"عدد التوالف بالقطع","number_scrab_by_item":"عدد التوالف بالطقم",
"gross_production_by_set":"إجمالي الانتاج بالطقم","gross_production":"إجمالي الانتاج بالقطعة","scrap_percent_by_item":"نسبة الاسكراب بالطقم",
"product_groub":"اسم المنتج المجمع","standard_dry_weight":"الوزن الجاف المعياري","average_dry_weight":"متوسط الوزن الجاف الفعلي"
,"standard_rate_hour":"المعدل المعياري بالساعة","rat_actually":"متوسط المعدل الفعلي بالساعة"
,"c_t_standard_per_second":"زمن الدورة المعياري بالثانية","c_t_actually":"متوسط زمن الدورة الفعلي بالثانية"
,"scrap_standard":"معياري الاسكراب","parts_patchsNumbers":"ارقام الباتشات بالجزء","scrap_percent_by_item":"نسبة التوالف بالقطعة"
,"Items_patchsNumbers":"ارقام الباتشات بالاصناف","scrabe_standard":"معياري التوالف","part_id":"رقم الجزء",
"mold_id":"رقم الاسطمبة","item_id":"رقم المنتج" } # for recaull module to change english name to arabic
columns_weight=["day","year","month","machine_id","product_name_by_parts","product_name","product_code","standard_dry_weight",
"standard_dry_weight_from","standard_dry_weight_to",'shift1_dry_weight1','shift1_dry_weight2','shift1_dry_weight3',
'shift1_dry_weight4','shift1_dry_weight5','shift2_dry_weight1','shift2_dry_weight2',
'shift2_dry_weight3','shift2_dry_weight4','shift2_dry_weight5',"average_dry_weight","part_id",'factory','deepth_mm','id_DayPartUnique']
columns_machine=["day","year","month","machine_id","product_name","product_code","scrabe_standard","sum_scrabe_no_parts"
,"number_scrab_by_item","gross_production",'gross_production_by_set','parts_patchsNumbers','scrap_percent_by_item'
,"number_day_use",'Items_patchsNumbers',"item_id","machine_type",'factory','tall_mm','id_DayPartUnique']
columns_cycle_time=["day","year","month","machine_id","mold_name","product_name","product_code","set","standard_rate_hour"
,"c_t_standard_per_second",'shift1_c_t1','shift1_c_t2','shift2_c_t1','shift2_c_t2',"rat_actually","c_t_actually","number_day_use",'rat_validation',"mold_id",'factory','width_mm','id_DayPartUnique']
class Group():
def __init__(self,folder,readfile,readsheet,column1,column2,writefile,writesheet):
#self.folder=folder
self.folder=folder
self.readfile=readfile
self.readsheet=readsheet
self.column1=column1
self.column2=column2
self.writefile=writefile
self.writesheet=writesheet
def monthly(self):
os.chdir(self.folder)
reader_file=self.readfile
daily_analysis3= pd.read_excel(reader_file,self.readsheet)
daily_analysis_bool3=daily_analysis3["year"]==self.column1
daily_analysis2=daily_analysis3[daily_analysis_bool3]
daily_analysis_bool2=daily_analysis2["month"]==self.column2
daily_analysis=daily_analysis2[daily_analysis_bool2]
daily_analysis_bool=daily_analysis["day"]<=self.writefile #for less than the day
daily_analysis=daily_analysis[daily_analysis_bool]
daily_analysis=daily_analysis[qc_molds]
dry_weight3=daily_analysis[columns_weight]
dry_weight_bool=dry_weight3['average_dry_weight'].notnull()
dry_weight2=dry_weight3[dry_weight_bool]
scrap4=daily_analysis[columns_machine]
scrap_bool=scrap4["gross_production"].notnull()
scrap3=scrap4[scrap_bool]
scrap2=scrap3
molds_rate3=daily_analysis[columns_cycle_time]
molds_rate_bool=molds_rate3["c_t_actually"].notnull()
molds_rate2=molds_rate3[molds_rate_bool]
#scrap
scrap=scrap2.groupby(["product_name","product_code","scrabe_standard"])['number_scrab_by_item',
"gross_production_by_set","sum_scrabe_no_parts","gross_production","number_day_use"].sum()
scrap["scrap_percent_by_item"]=scrap['number_scrab_by_item']/scrap['gross_production']
#scrap= pd.DataFrame()
scrap_product_machine=scrap2.groupby(["product_name","product_code","machine_id","scrabe_standard"])['number_scrab_by_item',
"gross_production_by_set","sum_scrabe_no_parts","gross_production","number_day_use"].sum()
scrap_product_machine["scrap_percent_by_item"]=scrap_product_machine['number_scrab_by_item']/scrap_product_machine['gross_production']
scrap_machine_product=scrap2.groupby(["machine_id","scrabe_standard","product_name","product_code"])['number_scrab_by_item',
"gross_production_by_set","sum_scrabe_no_parts","gross_production","number_day_use"].sum()
scrap_machine_product["scrap_percent_by_item"]=scrap_machine_product['number_scrab_by_item']/scrap_machine_product['gross_production']
machines=scrap2.groupby(["machine_id","scrabe_standard","machine_type"])['number_scrab_by_item',"gross_production_by_set",
'sum_scrabe_no_parts',"gross_production","number_day_use"].sum()
machines["scrap_percent_by_item"]=machines['number_scrab_by_item']/machines['gross_production']
scrap_bool2=scrap2["scrap_percent_by_item"]>scrap2["scrabe_standard"]
scrap_ncr=scrap2[scrap_bool2]
print("please weight")
#production rate report
molds_rate=molds_rate2.groupby(["product_name","product_code","set","standard_rate_hour"
,"c_t_standard_per_second"])["rat_actually","c_t_actually"].mean()
#molds_rate_bools_ncr=molds_rate["rat_actually"]>molds_rate["standard_rate_hour"]
#molds_rate_ncr=molds_rate[molds_rate_bools_ncr]
dry_weight=dry_weight2.groupby(["product_name_by_parts","product_code","standard_dry_weight",
"standard_dry_weight_from","standard_dry_weight_to"])["average_dry_weight"].mean()
#dry_weight_bool_low=dry_weight["average_dry_weight"] < dry_weight["standard_dry_weight_from"]
#dry_weight_bool_high=dry_weight["average_dry_weight"] > dry_weight["standard_dry_weight_to"]
#dry_weight_ncr_low=dry_weight[dry_weight_bool_low]
#dry_weight_ncr_high=dry_weigh[dry_weight_bool_high]
# export
#validation data
#scap validation
product_parts_input=daily_analysis["gross_production"].sum
scrab_set_input=daily_analysis["number_scrab_by_item"].sum
#scrab_input=product_parts_input + product_set_input + scrab_parts_input + scrab_set_input
product_parts_outbut=machines["gross_production"].sum
scrab_set_outbut=machines["number_scrab_by_item"].sum
#scrab_output=product_parts_outbut + product_set_outbut + scrab_parts_outbut + scrab_set_outbut
#if scrab_input!=scrab_output:#not equal validation
# exit
# print("the scrap data or production data is worng kinldy review ")
#export analysis to excel sheets is named output in the same folder
# export monthly
# analysis
writer = pd.ExcelWriter("QC_molds_monthly_until.xlsx")
daily_analysis.rename(columns={c:c.lower() for c in col_rename})
daily_analysis.to_excel(writer,'input', index=False)
scrap.rename(columns={c:c.lower() for c in col_rename})
scrap.to_excel(writer,'scrap_product', index=True)
scrap_product_machine.rename(columns={c:c.lower() for c in col_rename})
scrap_product_machine.to_excel(writer,'scrap_product_machines', index=True)
scrap_machine_product.rename(columns={c:c.lower() for c in col_rename})
scrap_machine_product.to_excel(writer,'scrap_machines_product', index=True)
machines.rename(columns={c:c.lower() for c in col_rename})
machines.to_excel(writer,"scrap_machines", index=True)
dry_weight.rename(columns={c:c.lower() for c in col_rename})
dry_weight.to_excel(writer,'weights', index=True)
molds_rate.rename(columns={c:c.lower() for c in col_rename})
molds_rate.to_excel(writer,'c_t', index=True)
#weight_ncr_low=weight_ncr_low.rename(index=str, columns=col_rename)
#weight_ncr_low.to_excel(writer,'weight_ncr_low')
#weight_ncr_high=weight_ncr_high.rename(index=str, columns=col_rename)
print ("analysis for ")
print(self.column1)
print(self.column2)
print("for the days")
print(daily_analysis["day"].unique())
writer.save()
def daily_molds(self,yearDb,monthDb,dayDb):
os.chdir(self.folder)
mold_analysis4= pd.read_excel(self.readfile,self.readsheet)
print("the excel sheet was read")
'''must be in excel sheet (input) not showing null value for not mistacks in average result'''
last_year=int(mold_analysis4["year"].max())
print("_________daily report___________for yearr________",last_year,type(last_year))
mold_analysis_bool4=mold_analysis4["year"]==last_year
mold_analysis3=mold_analysis4[mold_analysis_bool4]
last_month=int(mold_analysis3["month"].max())
mold_analysis_bool3=mold_analysis3["month"]==last_month
mold_analysis2=mold_analysis3[mold_analysis_bool3]
#dateDay3=mold_analysis2['date_day'].tail(1)
print("_________daily report___________for month________",last_month,type(last_month))
#convert selecting to value
#dateDay2=dateDay3.values
#convert numpy ndarray to list
#dateDay1 = dateDay2.tolist()
#convert list to string
#dateDay = ' '.join([str(elem) for elem in dateDay1])
#day=int(dateDay.split("/",2)[1])
mold_days=mold_analysis2["day"].count()
day=int(dayDb)
daily_analysis1_bool=mold_analysis2["day"]==day
daily_analysis1=mold_analysis2[daily_analysis1_bool]
#validate input
#for fix nan error in ct
daily_analysis = daily_analysis1.dropna(subset=['c_t_actually'])#remove all numric data
daily_analysis = daily_analysis.dropna(subset=['c_t_actually'])#drop And for remove all rows with NaNs in column x use dropna:
daily_analysis["c_t_actually"]=daily_analysis["c_t_actually"].astype(int)#Last convert values to ints:
report_forCt=daily_analysis.groupby(["machine_id","mold_name"])["standard_dry_weight_from","standard_dry_weight_to","c_t_standard_per_second","standard_rate_hour","average_dry_weight","rat_actually","c_t_actually","rat_validation"].mean()
report=daily_analysis1.groupby(["machine_id","mold_name"])["standard_dry_weight_from","standard_dry_weight_to","average_dry_weight","dryweight_deviation_validation"].mean()
print("___________report data",report)
#report=pd.DataFrame()
mold_count=report_forCt["rat_actually"].count()
#cycle timpe report
c_t=report_forCt[["c_t_standard_per_second","standard_rate_hour","rat_actually","rat_validation","c_t_actually"]]
c_t_nonconfomity=c_t[c_t['rat_actually']*1.05<c_t['standard_rate_hour']]
c_t_nonconfomity_count=c_t_nonconfomity["rat_actually"].count()
if c_t_nonconfomity_count==0: # for ignor impty index error
c_t_nonconfomity=pd.DataFrame(index=[0])
c_t_nonconfomity["mold_name"]=0
c_t_nonconfomity["c_t_standard_per_second"]=0
c_t_nonconfomity["standard_rate_hour"]=0
c_t_nonconfomity["rat_actually"]=0
c_t_nonconfomity["rat_validation"]=0
c_t_nonconfomity["rat_validation"]=0
ct_ok=mold_count-c_t_nonconfomity_count
c_t_nonconfomity["c_t_nonconfomity_count"]=c_t_nonconfomity_count
c_t_nonconfomity["ct_ok"]=ct_ok
#weight report
wieght2=report
wieght=wieght2[wieght2["dryweight_deviation_validation"]<1]#filter nonconformity data
print("___________weight data",wieght)
#filter low weithrs
weight_nonconfomity_low=wieght[wieght['average_dry_weight']<wieght["standard_dry_weight_from"]] #add column tocount number of non conformity product
#add rows for filter high weight
#fix error for zero ncr
#if daily_analysis1["dryweight_deviation_validation"]==0
weight_nonconfomity_high=wieght[wieght['average_dry_weight']>wieght["standard_dry_weight_to"]]
#weight_nonconfomity=pd.DataFrame(index=[["machine_id","mold_name"],0])
#weight_nonconfomity=pd.DataFrame(index=[wieght["machine_id","mold_name"]])
weight_nonconfomity=weight_nonconfomity_high
weight_nonconfomity_count=weight_nonconfomity["average_dry_weight"].count()
if weight_nonconfomity_count==0: # for ignor impty index error
weight_nonconfomity=pd.DataFrame(index=[0])
if weight_nonconfomity_count>1: # for ignor impty index error
weight_nonconfomity=pd.DataFrame()##error we muast select index of groupby
weight_nonconfomity=weight_nonconfomity.append(weight_nonconfomity_low) #for append the light weights
weight_nonconfomity=weight_nonconfomity.append(weight_nonconfomity_high) #for append the heavy weights
weight_nonconfomity['weight_nonconfomity_count']=weight_nonconfomity_count
weight_ok=mold_count-weight_nonconfomity_count
weight_nonconfomity['weight_ok']=weight_ok
##screap report by parts##_________
#scap by parts
#report=pd.DataFrame()
scrap5=daily_analysis1[["machine_type","machine_id",'number_scrab_by_item','gross_production','scrap_percent_by_item',"number_day_use","mold_name","scrap_weight_kg","production_weight_kg","product_name","scrabe_standard"]]
#scrap5=report.append(scrap5)#we need fix rong calucate sacrap percent for
scrap_newmachines=scrap5[scrap5["machine_type"]=="new_machine"]
scrap_oldmachines=scrap5[scrap5["machine_type"]=="old_machine"]
scrap_part_new=scrap_newmachines["number_scrab_by_item"].sum()
production_part_new=scrap_newmachines["gross_production"].sum()
scrap_percent_new=scrap_part_new/production_part_new * 100
scrap_part=scrap5["number_scrab_by_item"].sum()
production_set=scrap5["gross_production"].sum()
scrap_percent=scrap_part/production_set * 100
scrap4=scrap5[scrap5["number_day_use"]==1] #for filter the using machines only
scrap3=scrap4[scrap4["scrap_percent_by_item"]>scrap4["scrabe_standard"]]
scrap2=scrap3[scrap3["gross_production"]>scrap3["number_scrab_by_item"]]# for filter any machines didn't create production
scrap=scrap2
print("_______________scrap____________________",scrap)
scrap["scrab_parts_new_machine"]=scrap_part_new
scrap["production_parts_new_machine"]=production_part_new
scrap["production_parts_all"]=production_set
scrap["scrap_percent_new_machine"]=scrap_percent_new
scrap["scrap_percent_all"]=scrap_percent
scrap["scrab_parts_all"]=scrap_part
scrap_nonconfomity_count=scrap["number_scrab_by_item"].count()
##scrap for molds___________________
'''must be in excel sheet (input) not showing null value for not mistacks in average result'''
scrap_molds=scrap5.groupby(["machine_type","machine_id","mold_name","scrabe_standard"])['number_scrab_by_item'].sum()
scrap_molds["gross_production"]=scrap5.groupby(["machine_type","machine_id","mold_name","scrabe_standard"])['gross_production'].sum()
scrap_molds["scrap_weight_kg"]=scrap5.groupby(["machine_type","machine_id","mold_name","scrabe_standard"])['scrap_weight_kg'].sum()
scrap_molds["production_weight_kg"]=scrap5.groupby(["machine_type","machine_id","mold_name","scrabe_standard"])['production_weight_kg'].sum()
scrap_molds["number_scrab_by_item"]=scrap5.groupby(["machine_type","machine_id","mold_name","scrabe_standard"])['number_scrab_by_item'].sum()
production_set_molds=scrap_molds["gross_production"].sum()
scrap_set_molds=scrap_molds["number_scrab_by_item"].sum()
scrap_percent_molds=scrap_set_molds/production_set_molds*100
scrap_molds["percent"]=(scrap_molds["number_scrab_by_item"]/scrap_molds["gross_production"])*100
##scrap for items(as item master)___
scrap_items=scrap5.groupby(["machine_type","machine_id","product_name"])['number_scrab_by_item'].sum()
scrap_items["gross_production"]=scrap5.groupby(["machine_type","machine_id","product_name"])['gross_production'].sum()
scrap_items["number_day_use"]=scrap5.groupby(["machine_type","machine_id","product_name"])['number_day_use'].mean()
scrap_items["scrap_weight_kg"]=scrap5.groupby(["machine_type","machine_id","product_name"])['scrap_weight_kg'].sum()
scrap_items["production_weight_kg"]=scrap5.groupby(["machine_type","machine_id","product_name"])['production_weight_kg'].sum()
scrap_items["number_scrab_by_item"]=scrap5.groupby(["machine_type","machine_id","product_name"])['number_scrab_by_item'].sum()
scrap_items["percent"]=((scrap_items["number_scrab_by_item"])/(scrap_items["gross_production"]))*100
# molds_newmachines=scrap_molds.filter(lambda x: x['machine_type']=="new_machine")
scrap_percent_new=scrap_part_new/production_part_new * 100
##scrap non conformity_______________________
if scrap_nonconfomity_count>0: # for ignor impty index error
# scrap=pd.DataFrame(index=[0])
scrap_nonconfomity=scrap2.groupby(["machine_id","mold_name"])['number_scrab_by_item'].sum()
#for ignor error when srcap is 0
else:
#if scrap_nonconfomity.datatypt==0: # for ignor impty index error
scrap_nonconfomity=pd.DataFrame(index=[0])
scrap_nonconfomity["mold_name"]=0
print("_____________test__________scrap nonconfromity____",scrap_nonconfomity)
#extract excel report
writer_report = pd.ExcelWriter("QC_molds_daily_archive.xlsx")
mold_analysis2.to_excel(writer_report,"input", index=False)
writer_report.save()
writer = pd.ExcelWriter("day_analysis2.xlsx")
#weight_nonconfomity.to_excel(writer,"weight_ncr")
c_t_nonconfomity.to_excel(writer,"c.t")
scrap.to_excel(writer,"scrap")
scrap_molds.to_excel(writer,"scrap_molds",merge_cells=False)
scrap_nonconfomity.to_excel(writer,"scrap_ncr")
scrap_items.to_excel(writer,"scrap_items",merge_cells=False)
report.to_excel(writer,merge_cells=False)
daily_analysis1.to_excel(writer,"input_molds", index=False)
writer.save()
#extract daily excel report by formating
wb = load_workbook("QC_molds_daily_summary.xlsx")
ws2= wb.get_sheet_by_name('daily')
ws=wb.copy_worksheet(ws2) #copy new sheet
#splite day
#lastDay = [daily_analysis1["day_date"].split('-')[2] for d in daily_analysis1.Date]
#lastDay=pd.to_DateFrame(daily_analysis1,columns=["day_date"])
#daily_analysis1.lastDay=pd.to_datetime(lastDay.Datetime,format='%d')
ws.title=str(day) #rename new sheet by the name
# Data can be assigned directly to cells
ws['b1'] = day #day for molds report
ws['c1'] = last_month #month for molds report
ws['d1'] = last_year #year for molds report
ws['A4'] = scrap_percent #scrap on all machie
ws['B4'] = scrap_percent_new #scrap on new macine
ws['a14'] =c_t_nonconfomity.iloc[0][6] #ct pass
ws['b14'] =c_t_nonconfomity.iloc[0][5] #ct not acceptable
ws['a25'] = weight_nonconfomity.iloc[0][5] #weight pass
ws['b25'] =weight_nonconfomity.iloc[0][4]
#if weight_nonconfomity_high>=1: # for ignor impty index error
# ws['b25'] =weight_nonconfomity_low.iloc[0][4] #weights low not acceptable
#else:
# ws['b25'] =0
#ws['a35'] = weight_nonconfomity.iloc[0][5] #weight pass
#if weight_nonconfomity_high>=1: # for ignor impty index error
# ws['b35'] =weight_nonconfomity_high.iloc[0][4] #weights hig not acceptable
#else:
# ws['b35'] =0 #weights hig not acceptable
#_______
ws['a35'] = weight_nonconfomity.iloc[0][5] #weight pass
ws['b25'] =weight_nonconfomity.iloc[0][4]
#to select index of columns for scraps
if scrap_nonconfomity_count>=1:
rows = scrap_nonconfomity.index
r = 4 # start at 10th row
c = 3 # column 'c'
for row in rows:
for item in row:
ws.cell(row=r, column=c).value = item
c += 1 # Column 'd'
c = 3
r += 1
#to select index of columns to weights
if c_t_nonconfomity_count>=1: # for ignor impty index error
rows = c_t_nonconfomity.index
#rows = weight_nonconfomity.index
r = 14 # start at 10th row
c = 3 # column 'c'
for row in rows:
for item in row:
ws.cell(row=r, column=c).value = item
c += 1 # Column 'd'
c = 3
r += 1
#to select index of columns to light weights
if weight_nonconfomity_count>=1: # for ignor impty index error
rows = weight_nonconfomity_low.index
#rows = weight_nonconfomity.index
r = 25 # start at 10th row
c = 3 # column 'c'
for row in rows:
for item in row:
ws.cell(row=r, column=c).value = item
c += 1 # Column 'd'
c = 3
r += 1
#to select index of columns to hight weights
if weight_nonconfomity_count>=1: # for ignor impty index error
rows = weight_nonconfomity_high.index
#rows = weight_nonconfomity.index
r = 35 # start at 10th row
c = 3 # column 'c'
for row in rows:
for item in row:
ws.cell(row=r, column=c).value = item
c += 1 # Column 'd'
c = 3
r += 1
wb.save("QC_molds_daily_summary.xlsx")
#__________________archinve work boook monthly and daily report________________________________________
#__________________archinve work boook monthly and daily report general________________________________________
#input report
wb_formats=load_workbook("format_QC_reports_v2.xlsx")
ws_input= wb_formats.get_sheet_by_name('input')
#ws_input=wb_formats.copy_worksheet(ws_input2) #copy new sheet
ws_input.sheet_view.zoomScale = 60 #zoom set
column_size_input=pd.DataFrame(mold_analysis2,columns=["month"]).shape[0]
#print("______test___")
#print(mold_analysis2)
r = 4 # start at 4th row
c = 1 # column 'a'
for row in range(0,column_size_input): #you must start by 0 to catch all data , if you start by 1 you ignore first row in data source
rows = mold_analysis2.iloc[row]
for item in rows:
ws_input.cell(row=r, column=c).value = item
c += 1 # Column 'd'
c = 1
r += 1
#__________________archinve work boook monthly and daily report for specific mold
wb_formats.save("QC_molds_daily_archive.xlsx")
def copy_between_workbooks(self):
print("________starting copy last sheet____")
import win32com
from win32com.client import DispatchEx
os.chdir(self.folder)
print("______________test_________")
print(self.folder)
#input last day sheets
excel = DispatchEx('Excel.Application')
wbP=excel.Workbooks.Open('QC_molds_daily_archive')
wbG=excel.Workbooks.Open(self.readfile)
#wbP=excel.Workbooks.Open(r'C:\Temp\Junk\Temp.xlsx')
#wbG=excel.Workbooks.Open(r'C:\Temp\Junk\Temp2.xlsx')
wbG.Worksheets("deleted").Copy(Before=wbP.Worksheets(self.readsheet))
wbP.SaveAs(r'C:\Temp\Junk\Temp.xlsx')
excel.Quit()
del excel # ensure Excel process ends
def copy_between_workbooks2(self,worksheet,index=None):
print("get last sheet in day")
os.chdir(self.folder)
wb=load_workbook(self.readfile)
# def add_sheet(self, worksheet, index=None):
"""Add an existing worksheet (at an optional index)."""
# if not isinstance(worksheet, self._worksheet_class):
# raise TypeError("The parameter you have given is not of the type '%s'" % self._worksheet_class.__name__)
#if index is None:
# self.worksheets.append(worksheet)
#else:
#self.worksheets.insert(index, worksheet)
ws_input= wb.get_sheet_by_name(worksheet)
wb_formats=load_workbook("QC_molds_daily_archive.xlsx")
#if not isinstance(worksheet, self._worksheet_class):
# raise TypeError("The parameter you have given is not of the type '%s'" % self._worksheet_class.__name__)
if index is None:
wb_formats.worksheets.append(ws_input)
else:
wb_formats.worksheets.insert(index, worksheet)
wb_formats.worksheets.insert(1, ws_input)
wb_formats.save("QC_molds_daily_archive.xlsx")
def yearly(self):#for statiscs to qc yearly molds report
os.chdir(self.folder)
reader=pd.read_excel(self.readfile,self.readsheet)
#for more filtraation about id_molds
input_report3=reader[qc_molds]
#input_report2=input_report3[input_report3["customer_name"]==customer]
input_report2=input_report3
input_report=input_report2[input_report2['year']==self.column1]
#verification
#1 for the right ct? ( previous mistaks is bad input)
#calcuate the logic actioly rat by recalculat form actioly cycltype
#????????
#2 for the right names? ( previous mistaks is bad input)
#calcuate the simulation between names
#????????
#ct report
clos_cycle2=input_report[columns_cycle_time]
ct_bool=clos_cycle2["c_t_actually"].notnull()
clos_cycle=clos_cycle2[ct_bool]
df = pd.DataFrame(clos_cycle, columns = qc_molds )
rat=pd.crosstab([df.mold_name,df.standard_rate_hour],[df.year,df.month],df.rat_actually,aggfunc='mean',margins=False)
ct=pd.crosstab([df.mold_name,df.c_t_standard_per_second],[df.year,df.month],df.c_t_actually,aggfunc='mean',margins=False)
#weight report
clos_weight2=input_report[columns_weight]
weight_bool=clos_weight2["average_dry_weight"].notnull()
clos_weight=clos_weight2[weight_bool]
df2 = pd.DataFrame(clos_weight, columns = qc_molds )
weight=pd.crosstab([df2.product_name_by_parts,df2.standard_dry_weight,
df2.standard_dry_weight_from,df2.standard_dry_weight_to],[df2.year,df2.month],df2.average_dry_weight
,aggfunc='mean',margins=False)
print("pleas wait")
#scrap report
scrap2=input_report[columns_machine]
scrap_bool=scrap2['gross_production'].notnull()
scrap=scrap2[scrap_bool]
df3 = pd.DataFrame(scrap, columns = qc_molds )
scrap_set=pd.crosstab([df3.product_name,df3.product_code,df3.scrabe_standard],[df3.year,df3.month],df3.number_scrab_by_item
,aggfunc='sum',margins=False)
scrap_parts=pd.crosstab([df3.product_name,df3.product_code,df3.scrabe_standard],[df3.year,df3.month],df3.sum_scrabe_no_parts
,aggfunc='sum',margins=False)
production_set=pd.crosstab([df3.product_name,df3.product_code,df3.scrabe_standard],[df3.year,df3.month],df3.gross_production
,aggfunc='sum',margins=False)
production_parts=pd.crosstab([df3.product_name,df3.product_code,df3.scrabe_standard],[df3.year,df3.month],df3.gross_production
,aggfunc='sum',margins=False)
#final report
#day_use=reader["number_day_use"]
#pd.to_numeric(day_use, errors='ignore')
#report=clos_cycle.groupby(["mold_name"])["number_day_use"].sum()
#report["product_name"]=weight["product_name"]
#report["number_day_use"]=day_use
#ouut put by rename columns
writer = pd.ExcelWriter(self.writefile)
input_report=input_report.rename(index=str, columns=col_rename)
input_report.to_excel(writer,"input",index=False)
rat=rat.rename(index=str, columns=col_rename)
rat.to_excel(writer,"rat")
ct=ct.rename(index=str, columns=col_rename)
ct.to_excel(writer,"ct")
weight=weight.rename(index=str, columns=col_rename)
weight.to_excel(writer,"weight")
print("gust wait")
scrap_set=scrap_set.rename(index=str, columns=col_rename)
scrap_set.to_excel(writer,'scrap_set')
scrap_parts=scrap_parts.rename(index=str, columns=col_rename)
scrap_parts.to_excel(writer,'scrap_parts')
production_set=production_set.rename(index=str, columns=col_rename)
production_set.to_excel(writer,'production_set')
production_parts=production_parts.rename(index=str, columns=col_rename)
production_parts.to_excel(writer,'production_parts')
#report=report.rename(index=str, columns=col_rename)
#report.to_excel(writer,'report')
clos_weight.to_excel(writer,'input_weight')
clos_cycle.to_excel(writer,'input_ct')
scrap.to_excel(writer,'input_scrap')
writer.save()
def yearly_ct(self):#for statiscs to qc molds report
os.chdir(self.folder)
reader=pd.read_excel(self.readfile,self.readsheet)
#input_report=reader[qc_molds]
#verification
#1 for the right ct? ( previous mistaks is bad input)
#calcuate the logic actioly rat by recalculat form actioly cycltype
#????????
#2 for the right names? ( previous mistaks is bad input)
#calcuate the simulation between names
#????????
#ct report
clos_cycle2=reader[columns_cycle_time]
ct_bool=clos_cycle2["c_t_actually"].notnull()
clos_cycle=clos_cycle2[ct_bool]
df = pd.DataFrame(clos_cycle, columns = columns_cycle_time )
rat=pd.crosstab([df.mold_name,df.standard_rate_hour],[df.year,df.month],df.rat_actually,aggfunc='mean',margins=False)
ct=pd.crosstab([df.mold_name,df.c_t_standard_per_second],[df.year,df.month],df.c_t_actually,aggfunc='mean',margins=False)
#final report
#ouut put by rename columns
writer = pd.ExcelWriter(self.writefile)
rat=rat.rename(index=str, columns=col_rename)
rat.to_excel(writer,"rat")
ct=ct.rename(index=str, columns=col_rename)
ct.to_excel(writer,"ct")
clos_cycle.to_excel(writer,'input_ct')
writer.save()
def generate_false_data(self,column1_number,column2_number,row_numbers,cell_form,cell_to,column_numbers):
os.chdir(self.folder)
reader=pd.read_excel(self.readfile,self.readsheet)
wb = xl.load_workbook(self.readfile)
ws= wb.get_sheet_by_name(self.readsheet)
#column_size=reader["month"].shape
column_size=pd.DataFrame(reader,columns=["month"]).shape[0]
int(column_size)
#reader=reader2.iloc[1:column_size+3]#select from 2nd row to last of rows
#ws2=list(ws.rows)[2]
for i in range(2,column_size):
LSL=reader.iloc[i][column1_number]#lower limit (row i and column G)
int(round(LSL,0))
USL=reader.iloc[i][column2_number]#upper limit (row i and column H)
int(round(USL,0))
weight_average=(LSL+ USL)/2 #average
differance=(weight_average-LSL)/3 #for decrease diviation
LCL=weight_average-differance #for rais CPK
UCL=weight_average+differance #for rais CPK
c = 9 # column 'H'
for n in range(row_numbers):
#random.seed(10)
random_value = random.uniform(LCL,UCL)
#random_value = randint(LSL,USL)
#str(round(random_value,0))
#int(random_value)
#values.append(random_value)
# for item in values:
ws.cell(row=i, column=c).value = random_value
c += 1
#n+=1
wb.save(self.writefile)
def spc_molds(self,year,month,*mold,create_workbook=True):
'''this functions for select vba anlaysis '''
print("________starting statsic process charts____")
#genecal data
os.chdir(self.folder)
#prepare sheets
#ws_index=wb_formats.copy_worksheet(ws_index)
save_name=str(year)+"-"+str(month)+" QC_SPC.xlsx"
#prepare data
if create_workbook:
wb_formats=load_workbook("format_QC_reports_v2.xlsx")
ws_data2=wb_formats.get_sheet_by_name("spc")
ws_data=wb_formats.copy_worksheet(ws_data2)
ws_data.title=str(mold)
else:
wb_formats=load_workbook(save_name)
ws_data2=wb_formats.get_sheet_by_name("spc")
ws_data=wb_formats.copy_worksheet(ws_data2)
ws_data.title=str(mold)
ws_index=wb_formats.get_sheet_by_name("index")
cols_monthly=["machine_id","mold_name","product_name","product_code","standard_dry_weight",
"standard_dry_weight_from","standard_dry_weight_to","average_dry_weight","year","month","day",
"standard_rate_hour","c_t_standard_per_second","c_t_actually","rat_actually","item_id","mold_id","scrabe_standard",'sum_scrabe_shortage_bySet','sum_scrabe_roll_bySet',
'sum_scrabe_broken_bySet','sum_scrabe_curve_bySet','sum_scrabe_shrinkage_bySet','sum_scrabe_dimentions_bySet','sum_scrabe_weight_bySet','sum_scrabe_dirty_bySet',
'sum_scrabe_cloration_bySet','sum_scrabe_no_parts','number_scrab_by_item',
'parts_patchsNumbers','gross_production','scrap_percent_by_item','Items_patchsNumbers','product_by_set_or_no',
'parts_symbole','part_id','id_DayPartUnique','machine_type','factory','tall_mm','width_mm','deepth_mm','customer_name']
daily_input= | pd.read_excel(self.readfile,"input") | pandas.read_excel |
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from scipy.stats import kurtosis
import matplotlib.pyplot as plt
from scipy.stats import skew
def Outliers_StdDev(data: pd.Series, distance_threshold: int) -> list:
"""
Returns the outliers in a pandas series with the specified distance threshold value
by using standard deviation method.
Tbis approach is to remove the outlier points by eliminating any points that were above (Mean + 3*SD)
and any points below (Mean - 3*SD). The recommended value for distance threshold will be 3 and
this is parameterized for user convenience. SD is Standard deviation.
Example: If x is a point in the dataframe column value,
x will be considered as outlier if x < (mean - 3*SD) or x > (mean + 3*SD).
:param data : The pandas series used for detecting outliers.
:param distance_threshold : Specifies how many times the standard deviation value is away from the mean.
return : The list of outlier values in the given series.
"""
outliers = []
# Find mean, standard deviation for the given pandas series
data_mean, data_std = np.mean(data), np.std(data)
anomaly_cutoff = data_std * distance_threshold
# Get the lower limit and upper limit values
lower, upper = data_mean - anomaly_cutoff, data_mean + anomaly_cutoff
# Get the values (outliers) which are greater than upper limit and lesser than lower limit
outliers = [x for x in data if x < lower or x > upper]
return outliers
def Calculate_Statistics(dataframe: pd.Series, step_size: int, length: int) -> pd.DataFrame:
"""
Returns the pandas dataframe with different statistcal values as features like Mean, Standard Deviation,
Kurtosis, Skewness, Subsequence start.
This method will take the pandas dataframe as input along with step size and length. Based on the step size
and length, the given dataframe will be divided into sub arrays like windows sliding. For every subarray, it
will calculate Mean, Standard deviation, Kurtosis, Skewness values. At the end, a new dataframe will be created
with all these calculated statistical values.
For example, consider a dataframe with 12 values, step size as 2 and length as 3 are given as input.
The sub arrays will be slided like {1 to 3, 3 to 6, 5 to 8, 7 to 10, 9 to 12}. For these subsequences,
statistcal methods will be implemented to get the required values.
:param dataframe : The pandas dataframe in which the statistical methods to be applied.
:step_size : It specifies how much values we need to slide through the dataframe.
:length : Pre defined value for choosing subsequence length.
return : A pandas dataframe with calculated mean, standard deviation, kurtosis and skewness values for the
subsequences based on the predefined length and step size.
"""
# Initialize Values
start = 1
end = length
# Initialize dictionary
mean = {}
std_dev = {}
kurto = {}
skewness = {}
# Get the copy of data
arr = dataframe.copy()
subarray = []
# Loop through the data to find mean,standard deviation,kurtosis and skewness
while end != len(arr):
j = 0
# Get the subsequences as sub arrays
for i in range(start, end):
subarray.append(arr[i])
j = j + 1
# Calculate
val = np.mean(subarray)
st_val = np.std(subarray)
kurt_val = kurtosis(subarray, fisher=True, bias=True)
skewness_val = skew(subarray)
# Assign values to corresponding dictionary value with subsequence start number as key
mean[start] = val
std_dev[start] = st_val
kurto[start] = kurt_val[0]
skewness[start] = skewness_val[0]
# Slide the window based on the step size
start = start + step_size
end = end + step_size
# Construct Dataframe with mean,standard deviation and kurtosis values
newdf = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Created by <NAME>
import unittest
import pandas as pd
import pandas.testing as pdtest
from allfreqs import AlleleFreqs
from allfreqs.classes import Reference, MultiAlignment
from allfreqs.tests.constants import (
REAL_ALG_X_FASTA, REAL_ALG_X_NOREF_FASTA, REAL_RSRS_FASTA,
REAL_ALG_L6_FASTA, REAL_ALG_L6_NOREF_FASTA,
SAMPLE_MULTIALG_FASTA, SAMPLE_MULTIALG_NOREF_FASTA, SAMPLE_REF_FASTA,
SAMPLE_MULTIALG_CSV, SAMPLE_MULTIALG_NOREF_CSV, SAMPLE_REF_CSV,
sample_sequences_df, SAMPLE_SEQUENCES_DICT, sample_sequences_freqs,
sample_sequences_freqs_amb, SAMPLE_FREQUENCIES,
SAMPLE_FREQUENCIES_AMB, REAL_ALG_X_DF, REAL_X_FREQUENCIES, REAL_ALG_L6_DF,
REAL_L6_FREQUENCIES, TEST_CSV
)
class TestBasic(unittest.TestCase):
def setUp(self) -> None:
ref = Reference("AAG-CTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGG-TAT")
alg = MultiAlignment(SAMPLE_SEQUENCES_DICT)
self.af = AlleleFreqs(multialg=alg, reference=ref)
self.af_amb = AlleleFreqs(multialg=alg, reference=ref, ambiguous=True)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_frequencies_ambiguous(self):
# Given/When
exp_freqs = sample_sequences_freqs_amb()
# Then
pdtest.assert_frame_equal(self.af_amb.frequencies, exp_freqs)
def test__get_frequencies(self):
# Given
test_freq = pd.Series({'A': 0.2, 'C': 0.2, 'G': 0.1, 'T': 0.3,
'-': 0.1, 'N': 0.1})
exp_freq = {'A': 0.2, 'C': 0.2, 'G': 0.1, 'T': 0.3, 'gap': 0.1,
'oth': 0.1}
# When
result = self.af._get_frequencies(test_freq)
# Then
self._dict_almost_equal(result, exp_freq)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
def test_to_csv_ambiguous(self):
# Given/When
self.af_amb.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES_AMB)
# Then
pdtest.assert_frame_equal(result, expected)
@staticmethod
def _dict_almost_equal(expected: dict, result: dict, acc=10**-8) -> bool:
"""Compare to dictionaries and ensure that all their values are the
same, accounting for some fluctuation up to the given accuracy value.
Args:
expected: expected dictionary
result: resulting dictionary
acc: accuracy to use [default: 10**-8]
"""
if expected.keys() == result.keys():
for key in expected.keys():
if abs(expected[key] - result[key]) < acc:
continue
return True
return False
# From Fasta
class TestFromFasta(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_fasta(sequences=SAMPLE_MULTIALG_FASTA)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
class TestFromFastaNoRef(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_fasta(sequences=SAMPLE_MULTIALG_NOREF_FASTA,
reference=SAMPLE_REF_FASTA)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
# From Csv
class TestFromCsv(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_csv(sequences=SAMPLE_MULTIALG_CSV)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
class TestFromCsvNoRef(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_csv(sequences=SAMPLE_MULTIALG_NOREF_CSV,
reference=SAMPLE_REF_CSV)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = | pd.read_csv(TEST_CSV) | pandas.read_csv |
import pandas as pd
import numpy as np
def clean_static_df(static_df):
static_df_clean = static_df
static_df_clean = pd.get_dummies(data=static_df_clean, columns=[
'sex']).drop('sex_MALE', axis=1)
static_df_clean.drop('discharge_destination', axis=1, inplace=True)
static_df_clean.loc[:, 'datetime_min'] = pd.to_datetime(static_df_clean['datetime_min'],
dayfirst=True)
first_lab_time_col = pd.to_datetime(
static_df_clean['datetime_min'], dayfirst=True)
static_df_clean.loc[:, 'icu_in_year'] = first_lab_time_col.dt.year
static_df_clean.loc[:, 'icu_in_month'] = first_lab_time_col.dt.month
static_df_clean.loc[:, 'icu_in_day'] = first_lab_time_col.dt.day
static_df_clean.drop('datetime_min', axis=1, inplace=True)
static_df_clean.drop('datetime_max', axis=1, inplace=True)
# TODO: why is there no hourly info?
static_df_clean.set_index('patient_id')
return static_df_clean
def clean_dynamic_df(dynamic_df, static_df):
dynamic_df_times = dynamic_df.merge(static_df[['patient_id','datetime_min']],
how='inner', on='patient_id')
dynamic_df_times.loc[:, 'datetime_min'] = pd.to_datetime(dynamic_df_times['datetime_min'],
dayfirst=True)
dynamic_df_times.loc[:, 'datetime'] = pd.to_datetime(dynamic_df_times['datetime'],
dayfirst=True)
dynamic_df_times.loc[:, 'hours_in'] = \
(dynamic_df_times['datetime'] - dynamic_df_times['datetime_min'])
dynamic_df_times = dynamic_df_times.drop(['datetime','datetime_min'], axis=1)
# upsample to hourly
dynamic_df_hourly = dynamic_df_times.set_index('hours_in').groupby('patient_id').resample('H').mean()
dynamic_df_hourly = dynamic_df_hourly.drop('patient_id', axis=1).reset_index()
# dynamic_df_hourly = dynamic_df_hourly.set_index('patient_id')
dynamic_df_clean = dynamic_df_hourly.set_index(['patient_id','hours_in']).dropna(how='all')
return dynamic_df_clean
def clean_treat_df(treat_df, static_df):
treat_df_times = treat_df.merge(static_df[['patient_id','datetime_min']],
how='inner', on='patient_id')
treat_df_times.loc[:, 'datetime_min'] = pd.to_datetime(treat_df_times['datetime_min'],
dayfirst=True)
treat_df_times.loc[:, 'datetime'] = pd.to_datetime(treat_df_times['date'],
dayfirst=True)
treat_df_times.loc[:, 'hours_in'] = \
(treat_df_times['datetime'] - treat_df_times['datetime_min'])
treat_df_times = treat_df_times.drop(['datetime','datetime_min'], axis=1)
treat_df_hourly = treat_df_times.set_index('hours_in').groupby('patient_id').resample('H').mean()
treat_df_hourly = treat_df_hourly.drop('patient_id', axis=1).reset_index()
treat_df_clean = treat_df_hourly.set_index(['patient_id','hours_in']).dropna(how='all')
return treat_df_clean
def clean_outcome_df(static_df):
outcome_df = static_df[['patient_id','discharge_destination','datetime_min','datetime_max']]
outcome_df.loc[:,'datetime_min'] = pd.to_datetime(outcome_df['datetime_min'])
outcome_df.loc[:,'datetime_max'] = pd.to_datetime(outcome_df.loc[:,'datetime_max'])
outcome_df['hours_in'] = outcome_df['datetime_max'] - outcome_df['datetime_min']
outcome_df.loc[:, 'death'] = (outcome_df['discharge_destination'] == 'Fallecimiento') * 1.0
outcome_df = outcome_df.drop(['discharge_destination','datetime_min','datetime_max'], axis=1)
outcome_df = outcome_df.set_index('patient_id')
return outcome_df
def get_CXYT_for_modeling(static_df, dynamic_df, treat_df, outcome_df, output_filepath):
static_df = static_df.reset_index()
dynamic_df = dynamic_df.reset_index()
treat_df = treat_df.reset_index()
outcome_df = outcome_df.reset_index()
# upsample treatemnt to hourly before merging
treat_df.loc[:, 'hours_in'] = | pd.to_timedelta(treat_df['hours_in']) | pandas.to_timedelta |
import pandas as pd
import numpy as np
DATA_PATH = 'rawdata/' #where the raw data files are
ALT_OUTPUT_PATH = 'alt_output/' #there will be many files produced -- the files that are not "the main ones"
# are placed in this directory
feasDF = pd.read_csv(DATA_PATH+"mipdev_feasibility.csv") #read the feasibilty file into a data frame
feasible_DF = feasDF[feasDF['Code'] != 'inf'].groupby('Problem Name').count()
infeasible_DF = feasDF[feasDF['Code'] == 'inf'].groupby('Problem Name').count()
feasible_instances = set(feasible_DF.index)
infeasible_instances = set(infeasible_DF.index)
#some of the instances do not have a complete set of labels
# (all are missing data from either emphasis_optimality or seperate_aggressive or both)
# and when a seed is missing any time data at all, the PDInt is also missing
instances_with_incomplete_data = set([ 'buildingenergy',
'neos-565672',
'neos-848589',
'neos-872648',
'neos-873061',
'netdiversion',
'npmv07',
'ns1758913',
'ofi',
'opm2-z11-s8',
'sing245',
'wnq-n100-mw99-14'])
feasible_instances = feasible_instances.difference(instances_with_incomplete_data)
infeasible_instances = infeasible_instances.difference(instances_with_incomplete_data)
STATUS_COLS = range(1,15) #which columns contain the completion status of an algorithm
DATA_COLS = range(15,29) #which columns contain the PD Ints
COL_OFFSET = 14 #the number of columns offsetting the completion status from PD Ints
PD = pd.read_csv(DATA_PATH+"mipdev_integral_data.csv") #read the PD Int file into a data frame
PD = PD.drop([0,1]) #remove the extraneous header rows
PD = PD.drop(PD.columns[1], axis=1) #drop seed #
Times = pd.read_csv(DATA_PATH+"mipdev_time_data.csv") #read the time data file into a data frame
Times = Times.drop([0,1]) #remove the extraneous header rows
Times = Times.drop(Times.columns[1], axis=1) #drop seed #
#in the time data files, the status and data columns are opposite that of the PDInt file for some reason
# the next line switches them back so that the status cols are in front of the data cols
Times = Times[[Times.columns[0]]+Times.columns[DATA_COLS].tolist()+Times.columns[STATUS_COLS].tolist()]
Regions = pd.read_csv(DATA_PATH+"regions_time_data.csv") #read the time data file into a data frame
Regions = Regions.drop([0,1]) #remove the extraneous header rows
Regions = Regions.drop([Regions.columns[1],'emphasis_cpsolver','emphasis_cpsolver.1'], axis=1)
#in the time data files, the status and data columns are opposite that of the PDInt file for some reason
# the next line switches them back so that the status cols are in front of the data cols
Regions = Regions[[Regions.columns[0]]+Regions.columns[DATA_COLS].tolist()+Regions.columns[STATUS_COLS].tolist()] #drop seed #, also drop the cpsolver data since it is all 600
RegionsPD = pd.read_csv(DATA_PATH+"regions_integral_data.csv")
RegionsPD = RegionsPD.drop([0,1])
RegionsPD = RegionsPD.drop([RegionsPD.columns[1],'emphasis_cpsolver','emphasis_cpsolver.1'], axis=1)
######the following data_frames contain data provided to us separately by Robert's colleague######
REDCOST_STATUS_COLS = range(1,3) #which columns contain the completion status of an algorithm
REDCOST_DATA_COLS = range(3,5) #which columns contain the PD Ints
REDCOST_COL_OFFSET = 2 #the number of columns offsetting the completion status from PD Ints
RedCost = pd.read_csv(DATA_PATH+"RedCost_time_data.csv") #read the time data file into a data frame
RedCost = RedCost.drop([0,1]) #remove the extraneous header rows
RedCost = RedCost.drop(RedCost.columns[1], axis=1) #drop seed #
#in the time data files, the status and data columns are opposite that of the PDInt file for some reason
# the next line switches them back so that the status cols are in front of the data cols
RedCost = RedCost[[RedCost.columns[0]]+RedCost.columns[REDCOST_DATA_COLS].tolist()+RedCost.columns[REDCOST_STATUS_COLS].tolist()]
RedCostPD = | pd.read_csv(DATA_PATH+"RedCost_integral_data.csv") | pandas.read_csv |
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import re
from math import ceil
import pandas as pd
from sklearn.metrics import classification_report
from scipy.stats import shapiro, boxcox, yeojohnson
from scipy.stats import probplot
from sklearn.preprocessing import LabelEncoder, PowerTransformer
from category_encoders.target_encoder import TargetEncoder
from sklearn.impute import SimpleImputer, MissingIndicator
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.linear_model import LinearRegression, LogisticRegression
# from .charts.classification_visualization import classification_visualization
# from .charts.charts import Plot, ScatterChart
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.utils.multiclass import unique_labels
from sklearn.manifold import TSNE
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
import json
from pyod.models.hbos import HBOS
from statsmodels.api import ProbPlot
# from .charts.charts_extras import (
# feature_importances_plot,
# regression_viz,
# classification_viz,
# )
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
RandomForestRegressor,
GradientBoostingRegressor,
)
from sklearn.svm import LinearSVC
import warnings
warnings.filterwarnings("ignore")
sns.set_palette("colorblind")
class CrawtoDS:
def __init__(
self,
data,
target,
test_data=None,
time_dependent=False,
features="infer",
problem="infer",
):
self.input_data = data
self.target = target
self.features = features
self.problem = problem
self.test_data = test_data
self.timedependent = time_dependent
if self.problem == "binary classification":
self.train_data, self.valid_data = train_test_split(
self.input_data, shuffle=True, stratify=self.input_data[self.target],
)
elif self.problem == "regression":
self.train_data, self.valid_data = train_test_split(
self.input_data, shuffle=True,
)
def nan_features(input_data):
"""a little complicated. map creates a %nan values and returns the feature if greater than the threshold.
filter simply filters out the false values """
f = input_data.columns.values
len_df = len(input_data)
nan_features = list(
filter(
lambda x: x is not False,
map(
lambda x: x
if self.input_data[x].isna().sum() / len_df > 0.25
else False,
f,
),
)
)
return nan_features
def problematic_features(self):
f = self.input_data.columns.values
problematic_features = []
for i in f:
if "Id" in i:
problematic_features.append(i)
elif "ID" in i:
problematic_features.append(i)
return problematic_features
def undefined_features(self):
if self.features == "infer":
undefined_features = list(self.input_data.columns)
undefined_features.remove(self.target)
for i in self.nan_features:
undefined_features.remove(i)
for i in self.problematic_features:
undefined_features.remove(i)
return undefined_features
def numeric_features(self):
numeric_features = []
l = self.undefined_features
for i in l:
if self.input_data[i].dtype in ["float64", "float", "int", "int64"]:
if len(self.input_data[i].value_counts()) / len(self.input_data) < 0.1:
pass
else:
numeric_features.append(i)
return numeric_features
def categorical_features(self, threshold=10):
self.undefined_features
categorical_features = []
to_remove = []
l = self.undefined_features
for i in l:
if len(self.input_data[i].value_counts()) / len(self.input_data[i]) < 0.10:
categorical_features.append(i)
return categorical_features
def indicator(self):
indicator = MissingIndicator(features="all")
indicator.fit(self.train_data[self.undefined_features])
return indicator
def train_missing_indicator_df(self):
x = self.indicator.transform(self.train_data[self.undefined_features])
x_labels = ["missing_" + i for i in self.undefined_features]
missing_indicator_df = pd.DataFrame(x, columns=x_labels)
columns = [
i
for i in list(missing_indicator_df.columns.values)
if missing_indicator_df[i].max() == True
]
return missing_indicator_df[columns].replace({True: 1, False: 0})
def valid_missing_indicator_df(self):
x = self.indicator.transform(self.valid_data[self.undefined_features])
x_labels = ["missing_" + i for i in self.undefined_features]
missing_indicator_df = pd.DataFrame(x, columns=x_labels)
columns = list(self.train_missing_indicator_df)
return missing_indicator_df[columns].replace({True: 1, False: 0})
def numeric_imputer(self):
numeric_imputer = SimpleImputer(strategy="median", copy=True)
numeric_imputer.fit(self.train_data[self.numeric_features])
return numeric_imputer
def categorical_imputer(self):
categorical_imputer = SimpleImputer(strategy="most_frequent", copy=True)
categorical_imputer.fit(self.train_data[self.categorical_features])
return categorical_imputer
def train_imputed_numeric_df(self):
x = self.numeric_imputer.transform(self.train_data[self.numeric_features])
x_labels = [i + "_imputed" for i in self.numeric_features]
imputed_numeric_df = pd.DataFrame(x, columns=x_labels)
return imputed_numeric_df
def valid_imputed_numeric_df(self):
x = self.numeric_imputer.transform(self.valid_data[self.numeric_features])
x_labels = [i + "_imputed" for i in self.numeric_features]
imputed_numeric_df = pd.DataFrame(x, columns=x_labels)
return imputed_numeric_df
def yeo_johnson_transformer(self):
yeo_johnson_transformer = PowerTransformer(method="yeo-johnson", copy=True)
yeo_johnson_transformer.fit(self.train_imputed_numeric_df)
return yeo_johnson_transformer
def yeo_johnson_target_transformer(self):
yeo_johnson_target_transformer = PowerTransformer(method="yeo-johnson", copy=True)
yeo_johnson_target_transformer.fit(
np.array(self.train_data[self.target]).reshape(-1, 1)
)
return yeo_johnson_target_transformer
def train_yeojohnson_df(self):
yj = self.yeo_johnson_transformer.transform(self.train_imputed_numeric_df)
columns = self.train_imputed_numeric_df.columns.values
columns = [i + "_yj" for i in columns]
yj = pd.DataFrame(yj, columns=columns)
return yj
def valid_yeojohnson_df(self):
yj = self.yeo_johnson_transformer.transform(self.valid_imputed_numeric_df)
columns = self.valid_imputed_numeric_df.columns.values
columns = [i + "_yj" for i in columns]
yj = pd.DataFrame(yj, columns=columns)
return yj
def train_transformed_target(self):
if self.problem == "binary classification":
return self.train_data[self.target]
elif self.problem == "regression":
s = self.yeo_johnson_target_transformer.transform(
np.array(self.train_data[self.target]).reshape(-1, 1)
)
s = pd.DataFrame(s, columns=[self.target])
return s
def valid_transformed_target(self):
if self.problem == "binary classification":
return self.valid_data[self.target]
elif self.problem == "regression":
s = self.yeo_johnson_target_transformer.transform(
np.array(self.valid_data[self.target]).reshape(-1, 1)
)
s = pd.DataFrame(s, columns=[self.target])
return s
def train_imputed_categorical_df(self):
x = self.categorical_imputer.transform(self.train_data[self.categorical_features])
x_labels = [i + "_imputed" for i in self.categorical_features]
imputed_categorical_df = pd.DataFrame(x, columns=x_labels)
return imputed_categorical_df
def valid_imputed_categorical_df(self):
x = self.categorical_imputer.transform(self.valid_data[self.categorical_features])
x_labels = [i + "_imputed" for i in self.categorical_features]
imputed_categorical_df = pd.DataFrame(x, columns=x_labels)
return imputed_categorical_df
def hbos_transformer(self):
hbos = HBOS()
hbos.fit(self.train_transformed_data)
return hbos
def train_hbos_column(self):
hbos_t = self.hbos_transformer.predict(self.train_transformed_data)
return hbos_t
def valid_hbos_column(self):
hbos_v = self.hbos_transformer.predict(self.valid_transformed_data)
return hbos_v
def test_hbos_column(self):
hbos_test = self.hbos_transformer.predict(self.test_transformed_data)
return hbos_test
def target_encoder(self):
te = TargetEncoder(cols=self.train_imputed_categorical_df.columns.values)
te.fit(X=self.train_imputed_categorical_df, y=self.train_transformed_target)
return te
def train_target_encoded_df(self):
te = self.target_encoder.transform(self.train_imputed_categorical_df)
columns = list(
map(
lambda x: re.sub(r"_imputed", "_target_encoded", x),
list(self.train_imputed_categorical_df.columns.values),
)
)
te = pd.DataFrame(data=te)
te.columns = columns
return te
def valid_target_encoded_df(self):
te = self.target_encoder.transform(self.valid_imputed_categorical_df)
columns = list(
map(
lambda x: re.sub(r"_imputed", "_target_encoded", x),
list(self.valid_imputed_categorical_df.columns.values),
)
)
te = pd.DataFrame(data=te)
te.columns = columns
return te
def train_transformed_data(self):
train_transformed_data = (
self.train_target_encoded_df.merge(
self.train_yeojohnson_df, left_index=True, right_index=True
)
.merge(self.train_missing_indicator_df, left_index=True, right_index=True)
.replace(np.nan, 0)
)
return train_transformed_data
def valid_transformed_data(self):
valid_transformed_data = (
self.valid_target_encoded_df.merge(
self.valid_yeojohnson_df, left_index=True, right_index=True
)
.merge(self.valid_missing_indicator_df, left_index=True, right_index=True)
.replace(np.nan, 0)
)
return valid_transformed_data
def test_missing_indicator_df(self):
if self.test_data is not None:
x = self.indicator.transform(self.test_data[self.undefined_features])
x_labels = ["missing_" + i for i in self.undefined_features]
missing_indicator_df = | pd.DataFrame(x, columns=x_labels) | pandas.DataFrame |
#!/usr/bin/env python3
#################################################
# Title: ADSB Plot
# Project: ADSB
# Version: 0.0.1
# Date: Jan, 2020
# Author: <NAME>, KJ4QLP
# Comment:
# - learning plotly
#################################################
import math
import string
import time
import sys
import os
import datetime
import json
import binascii
import argparse
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
def main():
""" Main entry point to start the service. """
startup_ts = datetime.datetime.utcnow().strftime("%Y%m%d_%H%M%S")
#--------START Command Line argument parser------------------------------------------------------
parser = argparse.ArgumentParser(description="ADSB Simple plot, plotly",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
cwd = os.getcwd()
fp_default = '/'.join([cwd, 'data'])
cfg = parser.add_argument_group('Configuration File')
cfg.add_argument('--adsb_path',
dest='adsb_path',
type=str,
default=fp_default,
help="ADSB Data File Path",
action="store")
cfg.add_argument('--adsb_file',
dest='adsb_file',
type=str,
default="adsb_valid_20200124_050237.json",
help="ADSB File",
action="store")
args = parser.parse_args()
#--------END Command Line argument parser------------------------------------------------------
os.system('reset')
fp = '/'.join([args.adsb_path,args.adsb_file])
if not os.path.isfile(fp) == True:
print ('ERROR: Invalid ADSB Data File: {:s}'.format(fp))
sys.exit()
print ('Importing ADSB Data File: {:s}'.format(fp))
with open(fp,'r') as f:
data = json.load(f)
print (data)
df = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
import pytest
import numpy as np
from datetime import date, timedelta, time, datetime
import dateutil
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import lrange
from pandas.compat.numpy import np_datetime64_compat
from pandas import (DatetimeIndex, Index, date_range, DataFrame,
Timestamp, offsets)
from pandas.util.testing import assert_almost_equal
randn = np.random.randn
class TestDatetimeIndexLikeTimestamp(object):
# Tests for DatetimeIndex behaving like a vectorized Timestamp
def test_dti_date_out_of_range(self):
# see gh-1475
pytest.raises(ValueError, DatetimeIndex, ['1400-01-01'])
pytest.raises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter',
'days_in_month', 'is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
assert result == expected
assert idx.freq == Timestamp(idx[-1], idx.freq).freq
assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr
class TestDatetimeIndex(object):
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
if method is not None:
assert idx.get_loc(idx[1], method,
tolerance=pd.Timedelta('0 days')) == 1
assert idx.get_loc('2000-01-01', method='nearest') == 0
assert idx.get_loc('2000-01-01T12', method='nearest') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance='1 day') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=pd.Timedelta('1D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=np.timedelta64(1, 'D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)) == 1
with tm.assert_raises_regex(ValueError,
'unit abbreviation w/o a number'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with pytest.raises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
with pytest.raises(
ValueError,
match='tolerance size must match target index size'):
idx.get_loc('2000-01-01', method='nearest',
tolerance=[pd.Timedelta('1day').to_timedelta64(),
pd.Timedelta('1day').to_timedelta64()])
assert idx.get_loc('2000', method='nearest') == slice(0, 3)
assert idx.get_loc('2000-01', method='nearest') == slice(0, 3)
assert idx.get_loc('1999', method='nearest') == 0
assert idx.get_loc('2001', method='nearest') == 2
with pytest.raises(KeyError):
idx.get_loc('1999', method='pad')
with pytest.raises(KeyError):
idx.get_loc('2001', method='backfill')
with pytest.raises(KeyError):
idx.get_loc('foobar')
with pytest.raises(TypeError):
idx.get_loc(slice(2))
idx = pd.to_datetime(['2000-01-01', '2000-01-04'])
assert idx.get_loc('2000-01-02', method='nearest') == 0
assert idx.get_loc('2000-01-03', method='nearest') == 1
assert idx.get_loc('2000-01', method='nearest') == slice(0, 2)
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
tm.assert_numpy_array_equal(idx.get_loc(time(12)),
np.array([12]), check_dtype=False)
tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),
np.array([]), check_dtype=False)
with pytest.raises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',
'1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1], dtype=np.intp))
tol_raw = [pd.Timedelta('1 hour'),
pd.Timedelta('1 hour'),
pd.Timedelta('1 hour').to_timedelta64(), ]
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=[np.timedelta64(x) for x in tol_raw]),
np.array([0, -1, 1], dtype=np.intp))
tol_bad = [pd.Timedelta('2 hour').to_timedelta64(),
pd.Timedelta('1 hour').to_timedelta64(),
'foo', ]
with pytest.raises(
ValueError, match='abbreviation w/o a number'):
idx.get_indexer(target, 'nearest', tolerance=tol_bad)
with pytest.raises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
assert '2000' in str(e)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = tm.round_trip_pickle(index)
tm.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
assert str(index.reindex([])[0].tz) == 'US/Eastern'
assert str(index.reindex(np.array([]))[0].tz) == 'US/Eastern'
def test_time_loc(self): # GH8667
from datetime import time
from pandas._libs.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i,
check_dtype=False)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
assert len(idx1) == periods
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
assert len(idx2) == periods
def test_nat(self):
assert DatetimeIndex([np.nan])[0] is pd.NaT
def test_week_of_month_frequency(self):
# GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise
d1 = date(2002, 9, 1)
d2 = date(2013, 10, 27)
d3 = date(2012, 9, 30)
idx1 = DatetimeIndex([d1, d2])
idx2 = DatetimeIndex([d3])
result_append = idx1.append(idx2)
expected = DatetimeIndex([d1, d2, d3])
tm.assert_index_equal(result_append, expected)
result_union = idx1.union(idx2)
expected = DatetimeIndex([d1, d3, d2])
tm.assert_index_equal(result_union, expected)
# GH 5115
result = date_range("2013-1-1", periods=4, freq='WOM-1SAT')
dates = ['2013-01-05', '2013-02-02', '2013-03-02', '2013-04-06']
expected = DatetimeIndex(dates, freq='WOM-1SAT')
tm.assert_index_equal(result, expected)
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
# GH2658
import datetime
start = datetime.datetime.now()
idx = DatetimeIndex(start=start, freq="1d", periods=10)
df = DataFrame(lrange(10), index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
assert isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
pytest.raises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
def test_comparisons_nat(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT,
'2014-06-01', '2014-07-01'])
darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'),
np_datetime64_compat('2014-03-01 00:00Z'),
np_datetime64_compat('nat'), np.datetime64('nat'),
np_datetime64_compat('2014-06-01 00:00Z'),
np_datetime64_compat('2014-07-01 00:00Z')])
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = Index([f(x) for x in rng], dtype='<U8')
tm.assert_index_equal(result, exp)
def test_iteration_preserves_tz(self):
# see gh-8890
index = date_range("2012-01-01", periods=3, freq='H', tz='US/Eastern')
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result == expected
index = date_range("2012-01-01", periods=3, freq='H',
tz=dateutil.tz.tzoffset(None, -28800))
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
# 9100
index = pd.DatetimeIndex(['2014-12-01 03:32:39.987000-08:00',
'2014-12-01 04:12:34.987000-08:00'])
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
assert isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
assert not idx.equals(list(idx))
non_datetime = Index(list('abc'))
assert not idx.equals(list(non_datetime))
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.loc['1/3/2000']
assert result.name == df.index[2]
result = df.T['1/3/2000']
assert result.name == df.index[2]
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
tm.assert_index_equal(result, ex)
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
assert idx.argmin() == 1
assert idx.argmax() == 0
def test_sort_values(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.sort_values()
assert ordered.is_monotonic
ordered = idx.sort_values(ascending=False)
assert ordered[::-1].is_monotonic
ordered, dexer = idx.sort_values(return_indexer=True)
assert ordered.is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0], dtype=np.intp))
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
assert ordered[::-1].is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1], dtype=np.intp))
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = Index([f(index[0])])
tm.assert_index_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
assert isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
assert (result['B'] == dr).all()
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
assert result.all()
result = index.isin(list(index))
assert result.all()
assert_almost_equal(index.isin([index[2], 5]),
np.array([False, False, True, False]))
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
assert (result == expected).all()
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
assert (result == expected).all()
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10,
data_gen_f=lambda *args, **kwargs: randn(),
r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
assert cols.dtype == np.dtype('O')
assert cols.dtype == joined.dtype
tm.assert_numpy_array_equal(cols.values, joined.values)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
assert index is joined
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * offsets.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1],
freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(
10, 10, data_gen_f=lambda *args: np.random.randint(2),
c_idx_type='p', r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assert_raises_regex(ValueError,
'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
| tm.assert_index_equal(idx, exp_idx) | pandas.util.testing.assert_index_equal |
import pandas as pd
import numpy as np
from tqdm import tqdm
import os
# import emoji
import gc
from utils.definitions import ROOT_DIR
from collections import OrderedDict
from utils.datareader import Datareader
def check_conditions( df, mean, std, error=(1.5,1.5)):
"""
checks if the dataframe given is near has the duration similar to the one that we want to create
similar == if its mean, std and number of emojis is at +/- error[] from it
:param df: dataframe to check
:param mean: target mean
:param std: target std
:param error:
:return:
"""
target_mean = np.mean(df['num_tracks'])
target_std = np.std(df['num_tracks'])
if mean > (target_mean + error[0]) or mean < (target_mean - error[0]):
print("error m ",mean,target_mean)
return False
if std > (target_std + error[1]) or std < (target_std - error[1]):
print("error s ",std,target_std)
return False
return True
def get_random_df_constrained( source_df, num_of_pl, min_v, max_v, mean, std, errors=(1.5, 1.5)):
"""
iterates until it creates a dataframe that satisfies the conditions.
"""
seed = 0
while True:
df = source_df[((source_df['num_tracks']) >= min_v) & ((source_df['num_tracks']) <= max_v)].sample(
n=num_of_pl, random_state=seed)
if check_conditions(df, mean=mean, std=std, error=errors):
break
seed+=1
return df,seed
def generate_train(playlists):
## mean
cates = {'cat1': (10, 50, 1000, 28.6, 11.2), 'cat2_1': (10, 40, 998, 23.8, 8.7),
'cat2_2': (70, 80, 2, 75, 4), 'cat3_1': (10, 50, 314, 29.4, 11.4),
'cat3_2': (51, 75, 425, 62, 7.2), 'cat3_3': (75, 100, 261, 87, 7.1),
'cat4': (40, 100, 1000, 63, 16.5), 'cat5': (40, 100, 1000, 63.5, 17.2),
'cat6': (40, 100, 1000, 63.6, 16.7), 'cat7': (101, 250, 1000, 150, 38.6),
'cat8': (101, 250, 1000, 151.7, 38.6), 'cat9': (150, 250, 1000, 189, 28),
'cat_10': (150, 250, 1000, 187.5, 27)}
cates = OrderedDict(sorted(cates.items(), key=lambda t: t[0]))
cat_pids = {}
seeds = [0] * len(cates)
count = 0
for cat, info in cates.items():
print(cat)
df, seeds[count] = get_random_df_constrained(playlists, min_v=info[0], max_v=info[1],
num_of_pl=info[2],
mean=info[3], std=info[4], errors=(1.5, 1.5))
cat_pids[cat] = list(df.pid)
playlists = playlists.drop(df.index)
count += 1
playlists = playlists.reset_index(drop=True)
return playlists, cat_pids
def generate_test(cat_pids, playlists, interactions):
def build_df_none(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
def build_df_name(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['name', 'pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
df_test_pl = pd.DataFrame()
df_test_itr = pd.DataFrame()
df_eval_itr = pd.DataFrame()
for cat in list(cat_pids.keys()):
if cat == 'cat1':
num_samples = 0
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
# all interactions used for evaluation
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
df_eval_itr = pd.concat([df_eval_itr, df_itr])
# clean interactions for training
interactions = interactions.drop(df_itr.index)
print("cat1 done")
elif cat == 'cat2_1' or cat == 'cat2_2':
num_samples = 1
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[df_itr['pos'] == 0]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat2 done")
elif cat == 'cat3_1' or cat == 'cat3_2' or cat == 'cat3_3':
num_samples = 5
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat3 done")
elif cat == 'cat4':
num_samples = 5
df = build_df_none(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat4 done")
elif cat == 'cat5':
num_samples = 10
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat5 done")
elif cat == 'cat6':
num_samples = 10
df = build_df_none(cat_pids, playlists, cat, num_samples)
df_test_pl = | pd.concat([df_test_pl, df]) | pandas.concat |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": | pandas.StringDtype() | pandas.StringDtype |
"""
accounting.py
Accounting and Financial functions.
project : pf
version : 0.0.0
status : development
modifydate :
createdate :
website : https://github.com/tmthydvnprt/pf
author : tmthydvnprt
email : <EMAIL>
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2016, tmthydvnprt
credits :
"""
import datetime
import numpy as np
import pandas as pd
from pf.constants import DAYS_IN_YEAR
from pf.util import get_age
################################################################################################################################
# Financial Statements
################################################################################################################################
def calc_balance(accounts=None, category_dict=None):
"""
Calculate daily balances of grouped assets/liabilities based on `category_dict`s from `accounts`, returns a DataFrame.
Balance sheet is split into these sections:
Assets
Current
Cash
...
Long Term
Investments
Property
...
Liabilities
Current
Credit Card
...
Long Term
Loans
...
categories = {
'Assets' : {
'Current': {
# User category keys and account DataFrame columns list for values
'Cash & Cash Equivalents': [
('Cash', 'BofA Checking'),
('Cash', 'BofA Savings'),
...
],
'User Category': [...]
...
},
'Long Term': {...}
},
'Liabilities' : {
'Current': {...},
'Long Term': {...}
}
}
"""
# Aggregate accounts based on category definition, via 3 level dictionary comprehension
balance_dict = {
(k0, k1, k2): accounts[v2].sum(axis=1) if v2 else pd.Series(0, index=accounts.index)
for k0, v0 in category_dict.iteritems()
for k1, v1 in v0.iteritems()
for k2, v2 in v1.iteritems()
}
# Convert to DataFrame
balance = pd.DataFrame(balance_dict)
return balance.fillna(0.0)
def balance_sheet(balance=None, period=datetime.datetime.now().year):
"""
Calculate and return a balance sheet.
Balance will be based on the last entry of account data (e.g. December 31st) for the given `period` time period,
which defaults to the current year.
All levels may be user defined by the category dictonary. The value of the last level must contain valid pandas DataFrame
column selectors, e.g. `Account Type` for single index column / level 0 access or `('Cash', 'Account Name')` for
multilevel indexing.
If a sequence of periods is passed, each period's data will be calculated and concatenated as MultiIndex columns.
Example:
```
balance = calc_balance(accounts, category_dict=categories)
balancesheet = balance_sheet(balance, period=2015)
```
"""
# Force to list, so code below is the same for all cases
if not isinstance(period, list):
period = [period]
balance_sheets = []
for p in period:
# Force period to string
p = str(p)
# Sum over Period and convert to Statement DataFrame
p_balance = pd.DataFrame(balance[p].iloc[-1])
p_balance.columns = ['$']
p_balance.index.names = ['Category', 'Type', 'Item']
# Calculate Net
net = p_balance[['$']].sum(level=[0, 1]).sum(level=1)
net.index = pd.MultiIndex.from_tuples([('Net', x0, 'Total') for x0 in net.index])
net.index.names = ['Category', 'Type', 'Item']
# Add Net
balance_df = | pd.concat([p_balance, net]) | pandas.concat |
import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.numpy_ import PandasDtype
from .base import BaseExtensionTests
class BaseSetitemTests(BaseExtensionTests):
def test_setitem_scalar_series(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
data[0] = data[1]
assert data[0] == data[1]
def test_setitem_sequence(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
original = data.copy()
data[[0, 1]] = [data[1], data[0]]
assert data[0] == original[1]
assert data[1] == original[0]
def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
ser = pd.Series(data)
original = ser.copy()
value = [data[0]]
if as_array:
value = data._from_sequence(value)
xpr = "cannot set using a {} indexer with a different length"
with pytest.raises(ValueError, match=xpr.format("list-like")):
ser[[0, 1]] = value
# Ensure no modifications made before the exception
self.assert_series_equal(ser, original)
with pytest.raises(ValueError, match=xpr.format("slice")):
ser[slice(3)] = value
self.assert_series_equal(ser, original)
def test_setitem_empty_indxer(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
original = data.copy()
data[np.array([], dtype=int)] = []
self.assert_equal(data, original)
def test_setitem_sequence_broadcasts(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
data[[0, 1]] = data[2]
assert data[0] == data[2]
assert data[1] == data[2]
@pytest.mark.parametrize("setter", ["loc", "iloc"])
def test_setitem_scalar(self, data, setter):
arr = pd.Series(data)
setter = getattr(arr, setter)
operator.setitem(setter, 0, data[1])
assert arr[0] == data[1]
def test_setitem_loc_scalar_mixed(self, data):
df = pd.DataFrame({"A": np.arange(len(data)), "B": data})
df.loc[0, "B"] = data[1]
assert df.loc[0, "B"] == data[1]
def test_setitem_loc_scalar_single(self, data):
df = pd.DataFrame({"B": data})
df.loc[10, "B"] = data[1]
assert df.loc[10, "B"] == data[1]
def test_setitem_loc_scalar_multiple_homogoneous(self, data):
df = pd.DataFrame({"A": data, "B": data})
df.loc[10, "B"] = data[1]
assert df.loc[10, "B"] == data[1]
def test_setitem_iloc_scalar_mixed(self, data):
df = pd.DataFrame({"A": np.arange(len(data)), "B": data})
df.iloc[0, 1] = data[1]
assert df.loc[0, "B"] == data[1]
def test_setitem_iloc_scalar_single(self, data):
df = pd.DataFrame({"B": data})
df.iloc[10, 0] = data[1]
assert df.loc[10, "B"] == data[1]
def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
df = pd.DataFrame({"A": data, "B": data})
df.iloc[10, 1] = data[1]
assert df.loc[10, "B"] == data[1]
@pytest.mark.parametrize(
"mask",
[
np.array([True, True, True, False, False]),
pd.array([True, True, True, False, False], dtype="boolean"),
pd.array([True, True, True, pd.NA, pd.NA], dtype="boolean"),
],
ids=["numpy-array", "boolean-array", "boolean-array-na"],
)
def test_setitem_mask(self, data, mask, box_in_series):
arr = data[:5].copy()
expected = arr.take([0, 0, 0, 3, 4])
if box_in_series:
arr = pd.Series(arr)
expected = pd.Series(expected)
arr[mask] = data[0]
self.assert_equal(expected, arr)
def test_setitem_mask_raises(self, data, box_in_series):
# wrong length
mask = np.array([True, False])
if box_in_series:
data = pd.Series(data)
with pytest.raises(IndexError, match="wrong length"):
data[mask] = data[0]
mask = pd.array(mask, dtype="boolean")
with pytest.raises(IndexError, match="wrong length"):
data[mask] = data[0]
def test_setitem_mask_boolean_array_with_na(self, data, box_in_series):
mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")
mask[:3] = True
mask[3:5] = pd.NA
if box_in_series:
data = pd.Series(data)
data[mask] = data[0]
assert (data[:3] == data[0]).all()
@pytest.mark.parametrize(
"idx",
[[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
ids=["list", "integer-array", "numpy-array"],
)
def test_setitem_integer_array(self, data, idx, box_in_series):
arr = data[:5].copy()
expected = data.take([0, 0, 0, 3, 4])
if box_in_series:
arr = pd.Series(arr)
expected = pd.Series(expected)
arr[idx] = arr[0]
self.assert_equal(arr, expected)
@pytest.mark.parametrize(
"idx, box_in_series",
[
([0, 1, 2, pd.NA], False),
pytest.param(
[0, 1, 2, pd.NA], True, marks=pytest.mark.xfail(reason="GH-31948")
),
(pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
(pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
],
ids=["list-False", "list-True", "integer-array-False", "integer-array-True"],
)
def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series):
arr = data.copy()
# TODO(xfail) this raises KeyError about labels not found (it tries label-based)
# for list of labels with Series
if box_in_series:
arr = pd.Series(data, index=[tm.rands(4) for _ in range(len(data))])
msg = "Cannot index with an integer indexer containing NA values"
with pytest.raises(ValueError, match=msg):
arr[idx] = arr[0]
@pytest.mark.parametrize("as_callable", [True, False])
@pytest.mark.parametrize("setter", ["loc", None])
def test_setitem_mask_aligned(self, data, as_callable, setter):
ser = pd.Series(data)
mask = np.zeros(len(data), dtype=bool)
mask[:2] = True
if as_callable:
mask2 = lambda x: mask
else:
mask2 = mask
if setter:
# loc
target = getattr(ser, setter)
else:
# Series.__setitem__
target = ser
operator.setitem(target, mask2, data[5:7])
ser[mask2] = data[5:7]
assert ser[0] == data[5]
assert ser[1] == data[6]
@pytest.mark.parametrize("setter", ["loc", None])
def test_setitem_mask_broadcast(self, data, setter):
ser = pd.Series(data)
mask = np.zeros(len(data), dtype=bool)
mask[:2] = True
if setter: # loc
target = getattr(ser, setter)
else: # __setitem__
target = ser
operator.setitem(target, mask, data[10])
assert ser[0] == data[10]
assert ser[1] == data[10]
def test_setitem_expand_columns(self, data):
df = pd.DataFrame({"A": data})
result = df.copy()
result["B"] = 1
expected = pd.DataFrame({"A": data, "B": [1] * len(data)})
self.assert_frame_equal(result, expected)
result = df.copy()
result.loc[:, "B"] = 1
self.assert_frame_equal(result, expected)
# overwrite with new type
result["B"] = data
expected = pd.DataFrame({"A": data, "B": data})
self.assert_frame_equal(result, expected)
def test_setitem_expand_with_extension(self, data):
df = pd.DataFrame({"A": [1] * len(data)})
result = df.copy()
result["B"] = data
expected = pd.DataFrame({"A": [1] * len(data), "B": data})
self.assert_frame_equal(result, expected)
result = df.copy()
result.loc[:, "B"] = data
self.assert_frame_equal(result, expected)
def test_setitem_frame_invalid_length(self, data):
df = pd.DataFrame({"A": [1] * len(data)})
xpr = "Length of values does not match length of index"
with pytest.raises(ValueError, match=xpr):
df["B"] = data[:5]
@pytest.mark.xfail(reason="GH#20441: setitem on extension types.")
def test_setitem_tuple_index(self, data):
s = pd.Series(data[:2], index=[(0, 0), (0, 1)])
expected = pd.Series(data.take([1, 1]), index=s.index)
s[(0, 1)] = data[1]
self.assert_series_equal(s, expected)
def test_setitem_slice(self, data, box_in_series):
arr = data[:5].copy()
expected = data.take([0, 0, 0, 3, 4])
if box_in_series:
arr = pd.Series(arr)
expected = pd.Series(expected)
arr[:3] = data[0]
self.assert_equal(arr, expected)
def test_setitem_loc_iloc_slice(self, data):
arr = data[:5].copy()
s = pd.Series(arr, index=["a", "b", "c", "d", "e"])
expected = pd.Series(data.take([0, 0, 0, 3, 4]), index=s.index)
result = s.copy()
result.iloc[:3] = data[0]
self.assert_equal(result, expected)
result = s.copy()
result.loc[:"c"] = data[0]
self.assert_equal(result, expected)
def test_setitem_slice_mismatch_length_raises(self, data):
arr = data[:5]
with pytest.raises(ValueError):
arr[:1] = arr[:2]
def test_setitem_slice_array(self, data):
arr = data[:5].copy()
arr[:5] = data[-5:]
self.assert_extension_array_equal(arr, data[-5:])
def test_setitem_scalar_key_sequence_raise(self, data):
arr = data[:5].copy()
with pytest.raises(ValueError):
arr[0] = arr[[0, 1]]
def test_setitem_preserves_views(self, data):
# GH#28150 setitem shouldn't swap the underlying data
view1 = data.view()
view2 = data[:]
data[0] = data[1]
assert view1[0] == data[1]
assert view2[0] == data[1]
def test_setitem_nullable_mask(self, data):
# GH 31446
# TODO: there is some issue with PandasArray, therefore,
# TODO: skip the setitem test for now, and fix it later
if data.dtype != PandasDtype("object"):
arr = data[:5]
expected = data.take([0, 0, 0, 3, 4])
mask = pd.array([True, True, True, False, False])
arr[mask] = data[0]
self.assert_extension_array_equal(expected, arr)
def test_setitem_dataframe_column_with_index(self, data):
# https://github.com/pandas-dev/pandas/issues/32395
df = expected = pd.DataFrame({"data": pd.Series(data)})
result = | pd.DataFrame(index=df.index) | pandas.DataFrame |
# %% [markdown]
# This python script takes audio files from "filedata" from sonicboom, runs each audio file through
# Fast Fourier Transform, plots the FFT image, splits the FFT'd images into train, test & validation
# and paste them in their respective folders
# Import Dependencies
import numpy as np
import pandas as pd
import scipy
from scipy import io
from scipy.io.wavfile import read as wavread
from scipy.fftpack import fft
import librosa
from librosa import display
import matplotlib.pyplot as plt
from glob import glob
import sklearn
from sklearn.model_selection import train_test_split
import os
from PIL import Image
import pathlib
import sonicboom
from joblib import Parallel, delayed
# %% [markdown]
# ## Read and add filepaths to original UrbanSound metadata
filedata = sonicboom.init_data('./data/UrbanSound8K/') #Read filedata as written in sonicboom
#Initialize empty dataframes to later enable saving the images into their respective folders
train = pd.DataFrame()
test = pd.DataFrame()
validation = pd.DataFrame()
""" for x = in range(10):
fileclass = filedata['classID'] == x
filtered = filedata[fileclass]
trainTemp, testTemp = train_test_split(filtered, test_size=0.20, random_state=0)
train = pd.concat([train, trainTemp])
test = pd.concat([test, testTemp]) """
# %%
# Read the entire filedata
filtered = filedata #Read the data in
trainTemp, valTemp = train_test_split(filtered, test_size=0.10, random_state=0) #Take 10% as blind test set
trainTemp, testTemp = train_test_split(trainTemp, test_size=0.20, random_state=0) #Split the remaining into an 80-20 train test split
# Assign the different splits to different dataframes
train = pd.concat([train, trainTemp])
test = | pd.concat([test, testTemp]) | pandas.concat |
import sys
from pathlib import Path
from pprint import pprint
import pandas as pd
from train_model import train_model_pipe
parent_dir = str(Path(__file__).parent.parent.resolve())
sys.path.append(parent_dir)
if __name__ == '__main__':
from features.build_features import clean_for_reg
raw_d = Path('../../data/raw')
repo_d = Path('../../reports')
repo_fig_d = Path('../../reports/figures')
df_train = | pd.read_csv(raw_d / 'train.csv') | pandas.read_csv |
import tarfile
with tarfile.open('data/aclImdb_v1.tar.gz', 'r:gz') as tar:
tar.extractall()
import pyprind
import pandas as pd
import os
basepath = 'aclImdb'
labels = {'pos':1, 'neg':0}
pbar = pyprind.ProgBar(50000)
df = pd.DataFrame()
for s in ('test','train'):
for l in ('pos','neg'):
path = os.path.join(basepath, s, l)
for file in sorted(os.listdir(path)):
with open(os.path.join(path, file), 'r', encoding='utf-8') as infile:
txt = infile.read()
df = df.append([[txt, labels[l]]], ignore_index=True)
pbar.update()
df.columns = ['review', 'sentiment']
df.head()
import numpy as np
np.random.seed(0)
df = df.reindex(np.random.permutation(df.index))
df.head()
df.to_csv('data/imdb.csv', index=False, encoding='utf-8')
df = | pd.read_csv('data/imdb.csv', encoding='utf-8') | pandas.read_csv |
# SCOTT ST. LOUIS, Michigan Publishing, Fall 2020
# Preliminary Metadata Curation: ACLS Humanities E-Book Collection on Fulcrum
import csv
import pandas as pd
def read_csv_with_pandas(filename):
"""
Function written by <NAME>, Digital Publishing Coordinator,
during troubleshooting with Scott St. Louis. November 13, 2020.
Opens the CSV file into a DataFrame object,
drops all columns except the ones specified in output_headers,
and converts the DataFrame to a list of row lists.
Parameters
----------
filename: str
The name of the CSV file.
Returns
----------
list_of_row_lists: list of lists
Records from metadata CSV file.
"""
df = | pd.read_csv(filename,dtype=str) | pandas.read_csv |
import random
import pandas as pd
from detoxai.preprocess import *
from sklearn import model_selection as ms
import os
import math
import json
class LoadData:
def __init__(self, task='all'):
with open('config.json', 'r') as f:
config = json.load(f)
self.paths = config['paths']
self.task = task
# self.data_dir = config['data']['data_dir']
self.models_dir = config['paths']['trained_models_dir']
self.train_test_dir = config['paths']['train_test_dir']
self.results_dir = config['paths']['results_dir']
if task == 'selfharm':
self.task_path = config['paths']['selfharm_file_path']
elif task == 'hatespeech':
self.task_path = config['paths']['hatespeech_file_path']
elif task == 'spam':
self.task_path = config['paths']['spam_file_path']
self.check_directories()
def check_directories(self):
temp1, temp2, temp3 = '..', '..', '..'
for i in range(1, len(self.models_dir.split('/'))):
temp1 = '/'.join([temp1, self.models_dir.split('/')[i]])
temp2 = '/'.join([temp2, self.train_test_dir.split('/')[i]])
temp3 = '/'.join([temp3, self.results_dir.split('/')[i]])
for temp in [temp1, temp2, temp3]:
if not os.path.isdir(temp):
os.mkdir(temp)
print('directory created: ', temp)
def load_by_name_or_path(self, file_path_or_name=None, num_samples='all'):
print('loading for ', self.task, ' from ', file_path_or_name)
if file_path_or_name == 'train_set' or file_path_or_name == 'test_set':
data = pd.read_csv(self.paths[self.task + '_' + file_path_or_name + '_path'])
elif file_path_or_name == 'stories':
data = pd.read_csv(self.paths['stories_file_path'])
data['prediction'] = None
data['probability'] = None
else:
data = pd.read_csv(file_path_or_name)
print('total number of samples available: ', len(data))
if num_samples == 'all':
print('taking all the data available in ', file_path_or_name)
else:
print('taking only ', num_samples, ' samples')
data = get_n_samples(data, num_samples)
return data
def split_data(self, train_percent, save_files=True):
tasks = ["hatespeech", "selfharm", "spam"]
if self.task !='all':
tasks = [self.task]
for task in tasks:
data = pd.read_csv(self.paths[task+'_file_path'])
normal = data[data['label'] == 0]
normal.reset_index(drop=True, inplace=True)
toxic = data[data['label'] == 1]
toxic.reset_index(drop=True, inplace=True)
normal_ = normal.sample(frac=1).reset_index(drop=True)
toxic_ = toxic.sample(frac=1).reset_index(drop=True)
if len(normal_)>len(toxic_)*2:
normal = normal_.iloc[:int(len(toxic)*1.2),:]
extra_test = normal_.iloc[int(len(toxic)*1.2):,:].reset_index(drop=True)
toxic = toxic_
elif len(toxic_)>len(normal_)*2:
toxic = toxic_.iloc[:int(len(normal_)*1.2),:]
extra_test = toxic_.iloc[int(len(normal)*1.2):,:].reset_index(drop=True)
normal = normal_
else:
toxic = toxic_.reset_index(drop=True)
normal = normal_.reset_index(drop=True)
extra_test = []
data = pd.concat([normal,toxic])
sss = ms.StratifiedShuffleSplit(n_splits=2, test_size=len(data)-int(train_percent*len(data)), train_size=int(train_percent*len(data)))
for train_index, test_index in sss.split(data['data'].values, data['label'].values):
x_train = data.iloc[train_index,:]
x_test = data.iloc[test_index, :]
x_train.reset_index(drop=True, inplace=True)
x_test.reset_index(drop=True, inplace=True)
if len(extra_test)>0:
x_test = pd.concat([x_test, extra_test])
x_test.reset_index(drop=True, inplace=True)
if save_files:
x_train.to_csv( self.paths[task+'_train_set_path'], index=False)
x_test.to_csv( self.paths[task+'_test_set_path'], index=False)
print('training and test samples are saved in the folder ', self.paths[task+'_train_set_path'])
print('num training samples ', len(x_train))
print('num test samples ', len(x_test))
else:
print('training and test samples are not saved to disk')
return x_train, x_test
def get_n_samples(x,num_samples):
x_sample_0 = x[x['label'] == 0][:math.floor(num_samples)]
x_sample_1 = x[x['label'] == 1][:math.floor(num_samples)]
x_ = | pd.concat([x_sample_0, x_sample_1]) | pandas.concat |
from datasets import load_dataset
import streamlit as st
import pandas as pd
from googletrans import Translator
import session_state
import time
from fuzzywuzzy import fuzz,process
# Security
#passlib,hashlib,bcrypt,scrypt
import hashlib
# DB Management
import sqlite3
import os
import psycopg2
# import torch
# from transformers import PegasusForConditionalGeneration, PegasusTokenizer
state = session_state.get(question_number=0)
translator = Translator()
# model_name = 'tuner007/pegasus_paraphrase'
# torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
# tokenizer = PegasusTokenizer.from_pretrained(model_name)
# model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device)
# def get_response(input_text,num_return_sequences,num_beams):
# batch = tokenizer([input_text],truncation=True,padding='longest',max_length=60, return_tensors="pt").to(torch_device)
# translated = model.generate(**batch,max_length=60,num_beams=num_beams, num_return_sequences=num_return_sequences, temperature=1.5)
# tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
# return tgt_text
@st.cache(suppress_st_warning=True)
def get_qa_pair_low(file, rand):
df = pd.read_csv(file, sep="\t", lineterminator='\n')
a = df.sample(1).reset_index()
st.text(df)
return {
"text": a["text"][0],
"question": a["question"][0],
"answer": a["answer\r"][0]
}
@st.cache(suppress_st_warning=True)
def get_qa_pair_mid(file, rand):
df = pd.read_csv(file,sep="\t", lineterminator='\n')
a = df.sample(1).reset_index()
return {
"text": a["text"][0],
"question": a["question"][0],
"answer": a["answer\r"][0]
}
@st.cache(suppress_st_warning=True)
def get_qa_pair_high(file, rand):
df = pd.read_csv(file,sep="\t", lineterminator='\n')
a = df.sample(1).reset_index()
return {
"text": a["text"][0],
"question": a["question"][0],
"answer": a["answer\r"][0]
}
@st.cache(suppress_st_warning=True)
def getmcq(rand):
df = pd.read_csv("mcq.tsv",sep="\t", lineterminator='\n')
a = df.sample(1).reset_index()
ind = df.keys()
return {
"question": a[ind[0]][0],
"real_ans": a[ind[1]][0],
"conf_ans": a[ind[2]][0]
}
# DB Functions
def make_hashes(password):
return hashlib.sha256(str.encode(password)).hexdigest()
def check_hashes(password, hashed_text):
if make_hashes(password) == hashed_text:
return hashed_text
return False
conn = psycopg2.connect('postgres://vjnsllyatxtnin:34fa0fecfe8a62314d<EMAIL>d38ce<EMAIL>cbf<EMAIL>fc<EMAIL>@ec2-50-19-160-40.compute-1.<EMAIL>.com:5432/d96bve3gfgrvgg', sslmode='require')
c = conn.cursor()
def create_usertable():
c.execute('CREATE TABLE IF NOT EXISTS userstable(username TEXT PRIMARY KEY, fullname TEXT, password TEXT)')
def create_stats_table():
c.execute('CREATE TABLE IF NOT EXISTS stats_table(username TEXT PRIMARY KEY, comprehension_score DOUBLE PRECISION DEFAULT 0.0, synonyms_score INTEGER DEFAULT 0, app_usage_score INTEGER DEFAULT 0, overall_score INTEGER DEFAULT 0)')
def add_userdata(username, fullname, password):
try:
c.execute('INSERT INTO userstable(username, fullname,password) VALUES (%s,%s,%s)', (username, fullname, password))
conn.commit()
except:
st.text("Unable to add value in database, as username already exists.")
def add_statsdata(username):
c.execute('INSERT INTO stats_table(username, comprehension_score, synonyms_score, app_usage_score, overall_score) VALUES (%s,%s,%s,%s,%s)', (username, 0.0, 0, 0, 0))
conn.commit()
def check_username(username):
c.execute("SELECT * FROM userstable WHERE username = %s", (username,))
data = c.rowcount
return data
def login_user(username, password):
c.execute('SELECT * FROM userstable WHERE username = %s AND password = %s', (username, password))
data = c.fetchall()
return data
def is_admin_user(username, password):
if username == 'admin' and password == '<PASSWORD>':
return True
else :
return False
def view_all_users():
c.execute('SELECT * FROM userstable')
data = c.fetchall()
return data
def get_comprehension_score():
c.execute('SELECT comprehension_score FROM stats_table where username = %s', (st.session_state['username'],))
data = c.fetchall().pop()[0]
return data
def update_comprehension_score(score):
c.execute('UPDATE stats_table set comprehension_score = %s, overall_score = overall_score + 1 where username = %s',(score, st.session_state['username']))
conn.commit()
def update_synonym_score():
c.execute('UPDATE stats_table set synonyms_score = synonyms_score + 1, overall_score = overall_score + 1 where username = %s', (st.session_state['username'],))
conn.commit()
def update_usage_score():
c.execute('UPDATE stats_table set app_usage_score = app_usage_score + 1, overall_score = overall_score + 1 where username = %s', (st.session_state['username'],))
conn.commit()
def get_leaderboard():
c.execute('SELECT * FROM stats_table order by overall_score DESC')
data = c.fetchall()
return data
def drive_basic_login():
st.sidebar.image("angrezzi.jpeg")
menu = ["Login", "SignUp"]
choice = st.sidebar.selectbox("Menu", menu)
if choice == "Login":
username = st.sidebar.text_input("<NAME>")
password = st.sidebar.text_input("Password", type='password')
if st.sidebar.checkbox("Login"):
create_usertable()
hashed_pswd = make_hashes(password)
result = login_user(username, check_hashes(password, hashed_pswd))
#fullname = get_user_fullname(username, check_hashes(password, hashed_pswd))
admin = is_admin_user(username, password)
if result:
st.session_state['username'] = username
st.success("Hi {}".format(username))
else:
st.warning("Incorrect Username/Password")
return result, admin
elif choice == "SignUp":
st.subheader("Create New Account")
fullname = st.text_input("Full Name")
new_user = st.text_input("Username")
new_password = st.text_input("Password", type='password')
if st.button("Signup"):
create_usertable()
create_stats_table()
row_count = check_username(new_user)
if row_count == 0:
add_userdata(new_user, fullname, make_hashes(new_password))
add_statsdata(new_user)
st.success("You have successfully created a valid account")
st.info("Go to Login Menu to login")
else :
st.warning("This username already exists. Any other username which comes to your mind?")
return False,False
def main():
result, is_admin = drive_basic_login()
if result:
scoreM = 0
score = 0
"""Adaptive Education"""
# authentication logic + get which class student belongs to : high, mid, low
stud_class = "low"
if is_admin:
option = st.radio(
'Choose anything you want to practise!',
("Reading Comprehension", "Translate to Hindi", "Translate to English", "Synonyms", "View User Details", "View Leader Board"))
else :
option = st.radio(
'Choose anything you want to practise!',
("Reading Comprehension", "Translate to Hindi", "Translate to English","Synonyms"))
st.write('You selected:', option)
if option == "Reading Comprehension":
st.title("Reading Comprehension")
st.text("Question Number : " + str(state.question_number))
comprehension_score = get_comprehension_score()
st.text("Your Score : " + str(comprehension_score))
if comprehension_score <= 5.0:
cqa = get_qa_pair_low("low.tsv", state.question_number)
elif comprehension_score > 5.0 and comprehension_score <= 15.0 :
cqa = get_qa_pair_mid("high.tsv", state.question_number)
else :
cqa = get_qa_pair_high("high.tsv", state.question_number)
st.subheader("Context : ")
st.markdown(cqa['text'])
st.subheader("Question : ")
st.markdown(cqa['question'])
message1 = st.text_area("Enter your answer")
# a = st.selectbox('Answer:', ["Please select an answer","Confirm Answer"])
# a = st.radio("Confirm : ", ["Answering","Confirm!"])
# if a != "Answering":
if st.button("Check"):
st.subheader("Your Answer :")
st.text(message1)
score = 0
if fuzz.ratio(message1.lower(), cqa["answer"].lower()) > 75:
score = fuzz.ratio(message1.lower(), cqa["answer"].lower())
if score > 0 :
new_score = comprehension_score + score
update_comprehension_score(new_score)
st.text("Score : " + str(score))
# if score:
# st.text("Correct!")
# st.subheader("Full Answer : ")
# st.text(cqa['answer'])
# else:
# st.text("Incorrect!")
# st.subheader("Actual Answer : ")
# st.text(cqa['answer'])
# writing to score.tsv
fp = open("score.tsv", "a")
fp.write(cqa["text"].strip() + "\t" + cqa["question"].strip() + "\t" + cqa[
"answer"].strip() + "\t" + message1.strip() + "\t" + str(score) + "\n")
fp.close()
if st.button("Show Answers"):
try:
df = pd.read_csv("score.tsv", sep="\t")
json = df.to_json(orient="records")
# json2 = {}
# for i in range(len(json["Text"])):
# json2[i] = {}
# for i in range(len(json["Text"])):
# json2[i][]
st.json(json)
except:
st.text("No Questions Answered Yet!")
if st.button("Get New Question"):
state.question_number += 1
scoreM += score
elif option == 'Translate to Hindi':
update_usage_score()
txt = st.text_area("Enter here to Translate", "Type Here")
out = translator.translate(txt, dest="hi")
st.subheader("Hindi Text : " + out.text)
elif option == "Translate to English":
update_usage_score()
txt2 = st.text_area("Enter here to Translate", "Type Here")
out2 = translator.translate(txt2, dest="en")
st.subheader("Hindi Text : " + out2.text)
elif option == 'View User Details':
user_result = view_all_users()
user_details = | pd.DataFrame(user_result, columns=["Username", "FullName", "Password"]) | pandas.DataFrame |
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas._libs.algos import Infinity, NegInfinity
import pandas.util._test_decorators as td
from pandas import DataFrame, Series
import pandas._testing as tm
class TestRank:
s = Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3])
df = DataFrame({"A": s, "B": s})
results = {
"average": np.array([1.5, 5.5, 7.0, 3.5, np.nan, 3.5, 1.5, 8.0, np.nan, 5.5]),
"min": np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5]),
"max": np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6]),
"first": np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6]),
"dense": np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]),
}
@pytest.fixture(params=["average", "min", "max", "first", "dense"])
def method(self, request):
"""
Fixture for trying all rank methods
"""
return request.param
@td.skip_if_no_scipy
def test_rank(self, float_frame):
import scipy.stats # noqa:F401
from scipy.stats import rankdata
float_frame["A"][::2] = np.nan
float_frame["B"][::3] = np.nan
float_frame["C"][::4] = np.nan
float_frame["D"][::5] = np.nan
ranks0 = float_frame.rank()
ranks1 = float_frame.rank(1)
mask = np.isnan(float_frame.values)
fvals = float_frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp0[mask] = np.nan
exp1 = np.apply_along_axis(rankdata, 1, fvals)
exp1[mask] = np.nan
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# integers
df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))
result = df.rank()
exp = df.astype(float).rank()
tm.assert_frame_equal(result, exp)
result = df.rank(1)
exp = df.astype(float).rank(1)
tm.assert_frame_equal(result, exp)
def test_rank2(self):
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0
result = df.rank(1, pct=True)
tm.assert_frame_equal(result, expected)
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = df.rank(0) / 2.0
result = df.rank(0, pct=True)
tm.assert_frame_equal(result, expected)
df = DataFrame([["b", "c", "a"], ["a", "c", "b"]])
expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])
result = df.rank(1, numeric_only=False)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])
result = df.rank(0, numeric_only=False)
tm.assert_frame_equal(result, expected)
df = DataFrame([["b", np.nan, "a"], ["a", "c", "b"]])
expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 3.0, 2.0]])
result = df.rank(1, numeric_only=False)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 1.0, 2.0]])
result = df.rank(0, numeric_only=False)
tm.assert_frame_equal(result, expected)
# f7u12, this does not work without extensive workaround
data = [
[datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 1)],
]
df = DataFrame(data)
# check the rank
expected = DataFrame([[2.0, np.nan, 1.0], [2.0, 3.0, 1.0]])
result = df.rank(1, numeric_only=False, ascending=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[1.0, np.nan, 2.0], [2.0, 1.0, 3.0]])
result = df.rank(1, numeric_only=False, ascending=False)
tm.assert_frame_equal(result, expected)
df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10, 1e60, 1e80, 1e-30]})
exp = DataFrame({"a": [3.5, 1.0, 3.5, 5.0, 6.0, 7.0, 2.0]})
tm.assert_frame_equal(df.rank(), exp)
def test_rank_does_not_mutate(self):
# GH#18521
# Check rank does not mutate DataFrame
df = DataFrame(np.random.randn(10, 3), dtype="float64")
expected = df.copy()
df.rank()
result = df
tm.assert_frame_equal(result, expected)
def test_rank_mixed_frame(self, float_string_frame):
float_string_frame["datetime"] = datetime.now()
float_string_frame["timedelta"] = timedelta(days=1, seconds=1)
result = float_string_frame.rank(1)
expected = float_string_frame.rank(1, numeric_only=True)
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
def test_rank_na_option(self, float_frame):
import scipy.stats # noqa:F401
from scipy.stats import rankdata
float_frame["A"][::2] = np.nan
float_frame["B"][::3] = np.nan
float_frame["C"][::4] = np.nan
float_frame["D"][::5] = np.nan
# bottom
ranks0 = float_frame.rank(na_option="bottom")
ranks1 = float_frame.rank(1, na_option="bottom")
fvals = float_frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp1 = np.apply_along_axis(rankdata, 1, fvals)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# top
ranks0 = float_frame.rank(na_option="top")
ranks1 = float_frame.rank(1, na_option="top")
fval0 = float_frame.fillna((float_frame.min() - 1).to_dict()).values
fval1 = float_frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fval0)
exp1 = np.apply_along_axis(rankdata, 1, fval1)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# descending
# bottom
ranks0 = float_frame.rank(na_option="top", ascending=False)
ranks1 = float_frame.rank(1, na_option="top", ascending=False)
fvals = float_frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fvals)
exp1 = np.apply_along_axis(rankdata, 1, -fvals)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# descending
# top
ranks0 = float_frame.rank(na_option="bottom", ascending=False)
ranks1 = float_frame.rank(1, na_option="bottom", ascending=False)
fval0 = float_frame.fillna((float_frame.min() - 1).to_dict()).values
fval1 = float_frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fval0)
exp1 = np.apply_along_axis(rankdata, 1, -fval1)
tm.assert_numpy_array_equal(ranks0.values, exp0)
tm.assert_numpy_array_equal(ranks1.values, exp1)
# bad values throw error
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
with pytest.raises(ValueError, match=msg):
float_frame.rank(na_option="bad", ascending=False)
# invalid type
with pytest.raises(ValueError, match=msg):
float_frame.rank(na_option=True, ascending=False)
def test_rank_axis(self):
# check if using axes' names gives the same result
df = DataFrame([[2, 1], [4, 3]])
tm.assert_frame_equal(df.rank(axis=0), df.rank(axis="index"))
tm.assert_frame_equal(df.rank(axis=1), df.rank(axis="columns"))
@td.skip_if_no_scipy
def test_rank_methods_frame(self):
import scipy.stats # noqa:F401
from scipy.stats import rankdata
xs = np.random.randint(0, 21, (100, 26))
xs = (xs - 10.0) / 10.0
cols = [chr(ord("z") - i) for i in range(xs.shape[1])]
for vals in [xs, xs + 1e6, xs * 1e-6]:
df = DataFrame(vals, columns=cols)
for ax in [0, 1]:
for m in ["average", "min", "max", "first", "dense"]:
result = df.rank(axis=ax, method=m)
sprank = np.apply_along_axis(
rankdata, ax, vals, m if m != "first" else "ordinal"
)
sprank = sprank.astype(np.float64)
expected = DataFrame(sprank, columns=cols).astype("float64")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", ["O", "f8", "i8"])
def test_rank_descending(self, method, dtype):
if "i" in dtype:
df = self.df.dropna()
else:
df = self.df.astype(dtype)
res = df.rank(ascending=False)
expected = (df.max() - df).rank()
tm.assert_frame_equal(res, expected)
if method == "first" and dtype == "O":
return
expected = (df.max() - df).rank(method=method)
if dtype != "O":
res2 = df.rank(method=method, ascending=False, numeric_only=True)
tm.assert_frame_equal(res2, expected)
res3 = df.rank(method=method, ascending=False, numeric_only=False)
tm.assert_frame_equal(res3, expected)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("dtype", [None, object])
def test_rank_2d_tie_methods(self, method, axis, dtype):
df = self.df
def _check2d(df, expected, method="average", axis=0):
exp_df = DataFrame({"A": expected, "B": expected})
if axis == 1:
df = df.T
exp_df = exp_df.T
result = df.rank(method=method, axis=axis)
tm.assert_frame_equal(result, exp_df)
disabled = {(object, "first")}
if (dtype, method) in disabled:
return
frame = df if dtype is None else df.astype(dtype)
_check2d(frame, self.results[method], method=method, axis=axis)
@pytest.mark.parametrize(
"method,exp",
[
("dense", [[1.0, 1.0, 1.0], [1.0, 0.5, 2.0 / 3], [1.0, 0.5, 1.0 / 3]]),
(
"min",
[
[1.0 / 3, 1.0, 1.0],
[1.0 / 3, 1.0 / 3, 2.0 / 3],
[1.0 / 3, 1.0 / 3, 1.0 / 3],
],
),
(
"max",
[[1.0, 1.0, 1.0], [1.0, 2.0 / 3, 2.0 / 3], [1.0, 2.0 / 3, 1.0 / 3]],
),
(
"average",
[[2.0 / 3, 1.0, 1.0], [2.0 / 3, 0.5, 2.0 / 3], [2.0 / 3, 0.5, 1.0 / 3]],
),
(
"first",
[
[1.0 / 3, 1.0, 1.0],
[2.0 / 3, 1.0 / 3, 2.0 / 3],
[3.0 / 3, 2.0 / 3, 1.0 / 3],
],
),
],
)
def test_rank_pct_true(self, method, exp):
# see gh-15630.
df = | DataFrame([[2012, 66, 3], [2012, 65, 2], [2012, 65, 1]]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename, 'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
self.assertRaises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
self.assertRaises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with tm.assertRaises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_describe(self):
self.series.describe()
self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count': 7, 'unique': 4,
'top': 'a', 'freq': 3}, index=result.index)
assert_series_equal(result, expected)
dt = list(self.ts.index)
dt.append(dt[0])
ser = Series(dt)
rs = ser.describe()
min_date = min(dt)
max_date = max(dt)
xp = Series({'count': len(dt),
'unique': len(self.ts.index),
'first': min_date, 'last': max_date, 'freq': 2,
'top': min_date}, index=rs.index)
assert_series_equal(rs, xp)
def test_describe_empty(self):
result = pd.Series().describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
nanSeries = Series([np.nan])
nanSeries.name = 'NaN'
result = nanSeries.describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
def test_describe_none(self):
noneSeries = Series([None])
noneSeries.name = 'None'
expected = Series([0, 0], index=['count', 'unique'], name='None')
assert_series_equal(noneSeries.describe(), expected)
class TestDataFrame(tm.TestCase, Generic):
_typ = DataFrame
_comparator = lambda self, x, y: assert_frame_equal(x, y)
def test_rename_mi(self):
df = DataFrame([
11, 21, 31
], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]))
df.rename(str.lower)
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
self.assertTrue(df.bool())
df = DataFrame([[False]])
self.assertFalse(df.bool())
df = DataFrame([[False, False]])
self.assertRaises(ValueError, lambda: df.bool())
self.assertRaises(ValueError, lambda: bool(df))
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({'A': [1, '2', 3.]})
result = o._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
self._compare(result, expected)
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
with tm.assertRaises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.], name='A')
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4], name='A')
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with tm.assertRaises(NotImplementedError):
df.interpolate(method='values')
def test_interp_various(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
expected = df.copy()
result = df.interpolate(method='polynomial', order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
assert_frame_equal(result, expected)
result = df.interpolate(method='cubic')
expected.A.loc[3] = 2.81621174
expected.A.loc[13] = 5.64146581
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
expected.A.loc[3] = 2
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923077
assert_frame_equal(result, expected)
result = df.interpolate(method='zero')
expected.A.loc[3] = 2.
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
def test_interp_alt_scipy(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
result = df.interpolate(method='barycentric')
expected = df.copy()
expected.ix[2, 'A'] = 3
expected.ix[5, 'A'] = 6
assert_frame_equal(result, expected)
result = df.interpolate(method='barycentric', downcast='infer')
assert_frame_equal(result, expected.astype(np.int64))
result = df.interpolate(method='krogh')
expectedk = df.copy()
expectedk['A'] = expected['A']
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
import scipy
result = df.interpolate(method='pchip')
expected.ix[2, 'A'] = 3
if LooseVersion(scipy.__version__) >= '0.17.0':
expected.ix[5, 'A'] = 6.0
else:
expected.ix[5, 'A'] = 6.125
assert_frame_equal(result, expected)
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3, 1] = 5
expected.loc[0, 2] = 3
expected.loc[1, 3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
# scipy route
tm._skip_if_no_scipy()
result = df.interpolate(axis=1, method='values')
assert_frame_equal(result, expected)
result = df.interpolate(axis=0)
expected = df.interpolate()
assert_frame_equal(result, expected)
def test_rowwise_alt(self):
df = DataFrame({0: [0, .5, 1., np.nan, 4, 8, np.nan, np.nan, 64],
1: [1, 2, 3, 4, 3, 2, 1, 0, -1]})
df.interpolate(axis=0)
def test_interp_leading_nans(self):
df = DataFrame({"A": [np.nan, np.nan, .5, .25, 0],
"B": [np.nan, -3, -3.5, np.nan, -4]})
result = df.interpolate()
expected = df.copy()
expected['B'].loc[3] = -3.75
assert_frame_equal(result, expected)
tm._skip_if_no_scipy()
result = df.interpolate(method='polynomial', order=1)
assert_frame_equal(result, expected)
def test_interp_raise_on_only_mixed(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': ['a', 'b', 'c', 'd'],
'C': [np.nan, 2, 5, 7],
'D': [np.nan, np.nan, 9, 9],
'E': [1, 2, 3, 4]})
with tm.assertRaises(TypeError):
df.interpolate(axis=1)
def test_interp_inplace(self):
df = DataFrame({'a': [1., 2., np.nan, 4.]})
expected = DataFrame({'a': [1., 2., 3., 4.]})
result = df.copy()
result['a'].interpolate(inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result['a'].interpolate(inplace=True, downcast='infer')
assert_frame_equal(result, expected.astype('int64'))
def test_interp_inplace_row(self):
# GH 10395
result = DataFrame({'a': [1., 2., 3., 4.],
'b': [np.nan, 2., 3., 4.],
'c': [3, 2, 2, 2]})
expected = result.interpolate(method='linear', axis=1, inplace=False)
result.interpolate(method='linear', axis=1, inplace=True)
assert_frame_equal(result, expected)
def test_interp_ignore_all_good(self):
# GH
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 2, 3, 4],
'C': [1., 2., np.nan, 4.],
'D': [1., 2., 3., 4.]})
expected = DataFrame({'A': np.array(
[1, 2, 3, 4], dtype='float64'),
'B': np.array(
[1, 2, 3, 4], dtype='int64'),
'C': np.array(
[1., 2., 3, 4.], dtype='float64'),
'D': np.array(
[1., 2., 3., 4.], dtype='float64')})
result = df.interpolate(downcast=None)
assert_frame_equal(result, expected)
# all good
result = df[['B', 'D']].interpolate(downcast=None)
assert_frame_equal(result, df[['B', 'D']])
def test_describe(self):
tm.makeDataFrame().describe()
tm.makeMixedDataFrame().describe()
tm.makeTimeDataFrame().describe()
def test_describe_percentiles_percent_or_raw(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
df = tm.makeDataFrame()
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[10, 50, 100])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[2])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[-2])
def test_describe_percentiles_equivalence(self):
df = tm.makeDataFrame()
d1 = df.describe()
d2 = df.describe(percentiles=[.25, .75])
assert_frame_equal(d1, d2)
def test_describe_percentiles_insert_median(self):
df = tm.makeDataFrame()
d1 = df.describe(percentiles=[.25, .75])
d2 = df.describe(percentiles=[.25, .5, .75])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('75%' in d2.index)
# none above
d1 = df.describe(percentiles=[.25, .45])
d2 = df.describe(percentiles=[.25, .45, .5])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('45%' in d2.index)
# none below
d1 = df.describe(percentiles=[.75, 1])
d2 = df.describe(percentiles=[.5, .75, 1])
assert_frame_equal(d1, d2)
self.assertTrue('75%' in d1.index)
self.assertTrue('100%' in d2.index)
# edge
d1 = df.describe(percentiles=[0, 1])
d2 = df.describe(percentiles=[0, .5, 1])
assert_frame_equal(d1, d2)
self.assertTrue('0%' in d1.index)
self.assertTrue('100%' in d2.index)
def test_describe_no_numeric(self):
df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8,
'B': ['a', 'b', 'c', 'd'] * 6})
desc = df.describe()
expected = DataFrame(dict((k, v.describe())
for k, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(desc, expected)
ts = tm.makeTimeSeries()
df = DataFrame({'time': ts.index})
desc = df.describe()
self.assertEqual(desc.time['first'], min(ts.index))
def test_describe_empty_int_columns(self):
df = DataFrame([[0, 1], [1, 2]])
desc = df[df[0] < 0].describe() # works
assert_series_equal(desc.xs('count'),
Series([0, 0], dtype=float, name='count'))
self.assertTrue(isnull(desc.ix[1:]).all().all())
def test_describe_objects(self):
df = DataFrame({"C1": ['a', 'a', 'c'], "C2": ['d', 'd', 'f']})
result = df.describe()
expected = DataFrame({"C1": [3, 2, 'a', 2], "C2": [3, 2, 'd', 2]},
index=['count', 'unique', 'top', 'freq'])
assert_frame_equal(result, expected)
df = DataFrame({"C1": pd.date_range('2010-01-01', periods=4, freq='D')
})
df.loc[4] = pd.Timestamp('2010-01-04')
result = df.describe()
expected = DataFrame({"C1": [5, 4, pd.Timestamp('2010-01-04'), 2,
pd.Timestamp('2010-01-01'),
pd.Timestamp('2010-01-04')]},
index=['count', 'unique', 'top', 'freq',
'first', 'last'])
assert_frame_equal(result, expected)
# mix time and str
df['C2'] = ['a', 'a', 'b', 'c', 'a']
result = df.describe()
expected['C2'] = [5, 3, 'a', 3, np.nan, np.nan]
assert_frame_equal(result, expected)
# just str
expected = DataFrame({'C2': [5, 3, 'a', 4]},
index=['count', 'unique', 'top', 'freq'])
result = df[['C2']].describe()
# mix of time, str, numeric
df['C3'] = [2, 4, 6, 8, 2]
result = df.describe()
expected = DataFrame({"C3": [5., 4.4, 2.607681, 2., 2., 4., 6., 8.]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
assert_frame_equal(result, expected)
assert_frame_equal(df.describe(), df[['C3']].describe())
assert_frame_equal(df[['C1', 'C3']].describe(), df[['C3']].describe())
assert_frame_equal(df[['C2', 'C3']].describe(), df[['C3']].describe())
def test_describe_typefiltering(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24, dtype='int64'),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
descN = df.describe()
expected_cols = ['numC', 'numD', ]
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descN, expected)
desc = df.describe(include=['number'])
assert_frame_equal(desc, descN)
desc = df.describe(exclude=['object', 'datetime'])
assert_frame_equal(desc, descN)
desc = df.describe(include=['float'])
assert_frame_equal(desc, descN.drop('numC', 1))
descC = df.describe(include=['O'])
expected_cols = ['catA', 'catB']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descC, expected)
descD = df.describe(include=['datetime'])
assert_series_equal(descD.ts, df.ts.describe())
desc = df.describe(include=['object', 'number', 'datetime'])
assert_frame_equal(desc.loc[:, ["numC", "numD"]].dropna(), descN)
assert_frame_equal(desc.loc[:, ["catA", "catB"]].dropna(), descC)
descDs = descD.sort_index() # the index order change for mixed-types
assert_frame_equal(desc.loc[:, "ts":].dropna().sort_index(), descDs)
desc = df.loc[:, 'catA':'catB'].describe(include='all')
assert_frame_equal(desc, descC)
desc = df.loc[:, 'numC':'numD'].describe(include='all')
assert_frame_equal(desc, descN)
desc = df.describe(percentiles=[], include='all')
cnt = Series(data=[4, 4, 6, 6, 6],
index=['catA', 'catB', 'numC', 'numD', 'ts'])
assert_series_equal(desc.count(), cnt)
self.assertTrue('count' in desc.index)
self.assertTrue('unique' in desc.index)
self.assertTrue('50%' in desc.index)
self.assertTrue('first' in desc.index)
desc = df.drop("ts", 1).describe(percentiles=[], include='all')
assert_series_equal(desc.count(), cnt.drop("ts"))
self.assertTrue('first' not in desc.index)
desc = df.drop(["numC", "numD"], 1).describe(percentiles=[],
include='all')
assert_series_equal(desc.count(), cnt.drop(["numC", "numD"]))
self.assertTrue('50%' not in desc.index)
def test_describe_typefiltering_category_bool(self):
df = DataFrame({'A_cat': pd.Categorical(['foo', 'foo', 'bar'] * 8),
'B_str': ['a', 'b', 'c', 'd'] * 6,
'C_bool': [True] * 12 + [False] * 12,
'D_num': np.arange(24.) + .5,
'E_ts': tm.makeTimeSeries()[:24].index})
# bool is considered numeric in describe, although not an np.number
desc = df.describe()
expected_cols = ['C_bool', 'D_num']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(desc, expected)
desc = df.describe(include=["category"])
self.assertTrue(desc.columns.tolist() == ["A_cat"])
# 'all' includes numpy-dtypes + category
desc1 = df.describe(include="all")
desc2 = df.describe(include=[np.generic, "category"])
assert_frame_equal(desc1, desc2)
def test_describe_timedelta(self):
df = DataFrame({"td": pd.to_timedelta(np.arange(24) % 20, "D")})
self.assertTrue(df.describe().loc["mean"][0] == pd.to_timedelta(
"8d4h"))
def test_describe_typefiltering_dupcol(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
s = df.describe(include='all').shape[1]
df = pd.concat([df, df], axis=1)
s2 = df.describe(include='all').shape[1]
self.assertTrue(s2 == 2 * s)
def test_describe_typefiltering_groupby(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
G = df.groupby('catA')
self.assertTrue(G.describe(include=['number']).shape == (16, 2))
self.assertTrue(G.describe(include=['number', 'object']).shape == (22,
3))
self.assertTrue(G.describe(include='all').shape == (26, 4))
def test_describe_multi_index_df_column_names(self):
""" Test that column names persist after the describe operation."""
df = pd.DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# GH 11517
# test for hierarchical index
hierarchical_index_df = df.groupby(['A', 'B']).mean().T
self.assertTrue(hierarchical_index_df.columns.names == ['A', 'B'])
self.assertTrue(hierarchical_index_df.describe().columns.names ==
['A', 'B'])
# test for non-hierarchical index
non_hierarchical_index_df = df.groupby(['A']).mean().T
self.assertTrue(non_hierarchical_index_df.columns.names == ['A'])
self.assertTrue(non_hierarchical_index_df.describe().columns.names ==
['A'])
def test_no_order(self):
tm._skip_if_no_scipy()
s = Series([0, 1, np.nan, 3])
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial')
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
def test_spline(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
def test_spline_extrapolate(self):
tm.skip_if_no_package(
'scipy', '0.15',
'setting ext on scipy.interpolate.UnivariateSpline')
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = | Series([1., 2., 3., 4., 5., 6., 6.]) | pandas.Series |
import os
import argparse
import pandas as pd
import numpy as np
import xgboost as xgb
from math import ceil
from operator import itemgetter
from itertools import product
from scipy.stats import pearsonr
import matplotlib.pyplot as plt
def evalerror_pearson(preds, dtrain):
labels = dtrain.get_label()
return 'pearsonr', pearsonr(preds, labels)[0]
def convert_model_to_c(bst, args, numf, filename):
bst.dump_model("{}_dump.raw.txt".format(filename))
num_nodes = []
mmax = 0
with open("{}_dump.raw.txt".format(filename)) as f:
for row in f:
if row.startswith('booster'):
if row.startswith('booster[0]'):
mmax = 0
else:
num_nodes.append(mmax + 1)
mmax = 0
continue
l = int(row.rstrip().replace(' ', '').split(':')[0])
if l > mmax:
mmax = l
num_nodes.append(mmax + 1)
forest = []
tree = None
b = 0
with open("{}_dump.raw.txt".format(filename)) as f:
for row in f:
if row.startswith('booster'):
if row.startswith('booster[0]'):
tree = [0] * num_nodes[b]
b += 1
else:
forest.append(tree)
tree = [0] * num_nodes[b]
b += 1
continue
l = row.rstrip().replace(' ', '').split(':')
if l[1][:4] == "leaf":
tmp = l[1].split('=')
tree[int(l[0])] = [-1, float(tmp[1]), -1, -1] # !!!!
else:
tmp = l[1].split('yes=')
tmp[0] = tmp[0].replace('[Features', '')
tmp[0] = tmp[0].replace('[Feature', '')
tmp[0] = tmp[0].replace('[f', '')
tmp[0] = tmp[0].replace(']', '')
tmp2 = tmp[0].split('<')
if float(tmp2[1]) < 0:
tmp2[1] = 1
tmp3 = tmp[1].split(",no=")
tmp4 = tmp3[1].split(',')
tree[int(l[0])] = [int(tmp2[0]), int(ceil(float(tmp2[1]))), int(tmp3[0]), int(tmp4[0])]
forest.append(tree)
with open('{}.c'.format(filename), 'w') as fout:
fout.write("static float score_{}_{}(unsigned int* v){{\n".format(args.model_name, args.type))
fout.write("float s = 0.;\n")
for tt in range(len(forest)):
fout.write(tree_to_code(forest[tt], 0, 1))
fout.write("\nreturn s;}\n")
"""
with open('{}.pyx'.format(filename), 'w') as fout:
fout.write("cdef extern from \"{}.c\":\n".format(filename))
fout.write("\tfloat score_{}(short unsigned short[{}] v)\n\n".format(args.type, numf))
fout.write("def myscore(sv):\n")
fout.write("\tcdef unsigned short[{}] v = sv\n".format(numf))
fout.write("\treturn score_{}(v)\n".format(args.type))
"""
os.remove("{}_dump.raw.txt".format(filename))
def tree_to_code(tree, pos, padding):
p = "\t" * padding
if tree[pos][0] == -1:
if tree[pos][1] < 0:
return p + "s = s {};\n".format(tree[pos][1])
else:
return p + "s = s + {};\n".format(tree[pos][1])
return p + "if (v[{}]<{}){{\n{}}}\n{}else{{\n{}}}".format(tree[pos][0], tree[pos][1], tree_to_code(tree, tree[pos][2], padding + 1), p, tree_to_code(tree, tree[pos][3], padding + 1))
def load_data(vector_filename, ion_type):
# Read file
if vector_filename.split('.')[-1] == 'pkl':
vectors = pd.read_pickle(vector_filename)
elif vector_filename.split('.')[-1] == 'h5':
#vectors = pd.read_hdf(vector_filename, key='table', stop=1000)
vectors = pd.read_hdf(vector_filename, key='table')
else:
print("Unsuported feature vector format")
exit(1)
print("{} contains {} feature vectors".format(args.vectors, len(vectors)))
#vectors = vectors[vectors["ce"]==2]
# Extract targets for given ion type
target_names = list(vectors.columns[vectors.columns.str.contains('targets')])
if not 'targets_{}'.format(ion_type) in target_names:
print("Targets for {} could not be found in vector file.".format(ion_type))
print("Vector file only contains these targets: {}".format(target_names))
exit(1)
targets = vectors.pop('targets_{}'.format(ion_type))
target_names.remove('targets_{}'.format(ion_type))
vectors.drop(labels=target_names, axis=1, inplace=True)
# Get psmids
psmids = vectors.pop('psmid')
# Cast vectors to numpy float for XGBoost
vectors.astype(np.float32)
return(vectors, targets, psmids)
def get_params_combinations(params):
keys, values = zip(*params.items())
combinations = [dict(zip(keys, v)) for v in product(*values)]
return(combinations)
def get_best_params(df, params_grid):
params = {}
best = df[df['test-rmse-mean'] == df['test-rmse-mean'].min()]
for p in params_grid.keys():
params[p] = best[p].iloc[0]
# num_boost_round = best['boosting-round'].iloc[0]
return(params)
def gridsearch(xtrain, params, params_grid):
cols = ['boosting-round', 'test-rmse-mean', 'test-rmse-std', 'train-rmse-mean', 'train-rmse-std']
cols.extend(sorted(params_grid.keys()))
result = | pd.DataFrame(columns=cols) | pandas.DataFrame |
import pandas as pd
import glob
import os
import numpy as np
import time
import fastparquet
import argparse
from multiprocessing import Pool
import multiprocessing as mp
from os.path import isfile
parser = argparse.ArgumentParser(description='Program to run google compounder for a particular file and setting')
parser.add_argument('--data', type=str,
help='location of the pickle file')
# don't use this for now
parser.add_argument('--word', action='store_true',
help='Extracting context for words only?')
parser.add_argument('--output', type=str,
help='directory to save dataset in')
args = parser.parse_args()
with open('/mnt/dhr/CreateChallenge_ICC_0821/no_ner_0_50000.txt','r') as f:
contexts=f.read().split("\n")
contexts=contexts[:-1]
def left_side_parser(df): # N N _ _ _
cur_df=df.copy()
try:
cur_df[['modifier','head','w1','w2','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def mid1_parser(df): # _ N N _ _
cur_df=df.copy()
try:
cur_df[['w1','modifier','head','w2','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2','w3'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def mid2_parser(df): # _ _ N N _
cur_df=df.copy()
try:
cur_df[['w1','w2','modifier','head','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2','w3'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def right_side_parser(df): # _ _ _ N N
cur_df=df.copy()
try:
cur_df[['w1','w2','w3','modifier','head']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df= | pd.DataFrame() | pandas.DataFrame |
import itertools
import json
import os
import os.path
from collections import Counter
import numpy as np
# from torchtext import data
import pandas as pd
import torch
from nltk.stem import WordNetLemmatizer
from torch.autograd import Variable
from tqdm import tqdm
from wiki_util import tokenizeText
class Wiki_DataSet:
def __init__(self, args):
"""Create an Wiki dataset instance. """
# self.word_embed_file = self.data_folder + 'embedding/wiki.ar.vec'
# word_embed_file = data_folder + "embedding/Wiki-CBOW"
self.data_dir = args.data_path
self.data_file = self.data_dir + "wikicmnt.json"
self.vocab_file = self.data_dir + 'vocabulary.json'
self.train_df_file = self.data_dir + 'train_df.pkl'
self.val_df_file = self.data_dir + 'val_df.pkl'
self.test_df_file = self.data_dir + 'test_df.pkl'
self.tf_file = self.data_dir + 'term_freq.json'
self.weight_file = self.data_dir + 'train_weights.json'
self.glove_dir = args.glove_path
self.glove_vec_size = args.word_embd_size
self.lemmatizer = WordNetLemmatizer()
self.class_num = -1
self.rank_num = args.rank_num
self.anchor_num = args.anchor_num
self.max_ctx_length = int(args.max_ctx_length / 2)
pass
'''
Extract the data from raw json file
'''
def extract_data_from_json(self):
'''
Parse the json file and return the
:return:
'''
cmnt_list, neg_cmnts_list = [], []
src_token_list, src_action_list = [], []
tgt_token_list, tgt_action_list = [], []
pos_edits_list, neg_edits_list = [], []
diff_url_list = []
word_counter, lower_word_counter = Counter(), Counter()
print("Sample file:", self.data_file)
with open(self.data_file, 'r', encoding='utf-8') as f:
for idx, json_line in enumerate(tqdm(f)):
# if idx % 100 == 0:
# print("== processed ", idx)
article = json.loads(json_line.strip('\n'))
# print(article['diff_url'])
'''
Json file format:
=================
revision_id: The revision ID
parent_id: The parent revision ID
timestamp: Timestamp
diff_url: The wikipedia link to show the difference between previous and current version.
page_title: The title of page.
comment: Revision comment.
src_token: List of tokens in before-editing version
src_action: Action flags for each token in before-editing version. E.g., 0 represents no action; -1 represents removed token.
tgt_token: List of tokens in after-editing version
tgt_action: Action flags for each token in after-editing version. E.g., 0 represents no action; 1 represents added token.
neg_cmnts: Negative samples of user comments in the same page.
pos_edits: Edit sentences for comments.
neg_edits: Negative edit sentences for comments.
'''
try:
# comment
comment = article['comment']
_, cmnt_tokens = tokenizeText(comment)
# cmnt_tokens = article['comment']
cmnt_list.append(cmnt_tokens)
# negative comments
# neg_cmnts = article['neg_cmnts']
neg_cmnts = []
for neg_cmnt in article['neg_cmnts']:
_, tokens = tokenizeText(comment)
neg_cmnts.append(tokens)
neg_cmnts_list.append(neg_cmnts)
# source tokens and actions
src_token = article['src_token']
src_token_list.append(src_token)
src_action_list.append(article['src_action'])
# target tokens and actions
tgt_token = article['tgt_token']
tgt_token_list.append(tgt_token)
tgt_action_list.append(article['tgt_action'])
# positive and negative edits
pos_edits = article['pos_edits']
pos_edits_list.append(pos_edits)
neg_edits = article['neg_edits']
neg_edits_list.append(neg_edits)
# diff url
diff_url = article['diff_url']
diff_url_list.append(diff_url)
# for counters
for word in cmnt_tokens + src_token + tgt_token + \
list(itertools.chain.from_iterable(neg_cmnts + pos_edits + neg_edits)):
word_counter[word] += 1
lower_word_counter[word.lower()] += 1
except:
# ignore the index error
print("ERROR: Index Error", article['revision_id'])
continue
# if idx >= 100:
# break
return cmnt_list, neg_cmnts_list, src_token_list, src_action_list, tgt_token_list, tgt_action_list, \
pos_edits_list, neg_edits_list, word_counter, lower_word_counter, diff_url_list
'''
Create dataset objects for wiki revision data.
Arguments:
args: arguments
val_ratio: The ratio that will be used to get split validation dataset.
shuffle: Whether to shuffle the data before split.
'''
def load_data(self, train_ratio, val_ratio):
print("loading wiki data ...")
# check the existence of data files
if os.path.isfile(self.train_df_file) and os.path.isfile(self.test_df_file) and os.path.isfile(self.vocab_file):
print("dataframe file exists:", self.train_df_file)
train_df = pd.read_pickle(self.train_df_file)
val_df = pd.read_pickle(self.val_df_file)
test_df = | pd.read_pickle(self.test_df_file) | pandas.read_pickle |
import os
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
from typing import Mapping, List
class Results:
def __init__(self, main_folder: str, combine_models: bool = False) -> None:
self.main_folder = main_folder
self.combine_models = combine_models
self.basic_results = {}
self.dtm_results = {}
self._load_results()
def get_data(self, name, dtm: bool = False, aggregated: bool = False):
if dtm:
if aggregated:
return self.dtm_results[name].groupby("Model").mean()
else:
return self.dtm_results[name]
else:
if aggregated:
return self.basic_results[name].groupby("Model").mean()
else:
return self.basic_results[name]
def get_keys(self):
return {
"basic": list(self.basic_results.keys()),
"dtm": list(self.dtm_results.keys()),
}
def visualize_table(self, dtm: bool = False, models: List[str] = None):
if dtm:
results = self.dtm_results["all"].copy()
else:
results = self.basic_results["all"].copy()
datasets = set([column for column, _ in results.columns if column])
if models:
results = results.loc[results[""]["Model"].isin(models), :]
return (
results.style.apply(highlight_max)
.format(
formatter={
(dataset, method): "{:.3f}"
for dataset in datasets
for method in [
"npmi",
"diversity",
]
}
)
.set_properties(**{"width": "10em", "text-align": "center"})
.set_table_styles([dict(selector="th", props=[("text-align", "center")])])
.set_table_styles([dict(selector="th", props=[("text-align", "center")])])
.set_table_styles(
{
(dataset, "npmi"): [
{"selector": "td", "props": "border-left: 2px solid #000066"}
]
for dataset in datasets
},
overwrite=False,
axis=0,
)
)
def visualize_table_tq(self, dtm: bool = False):
if dtm:
to_plot = self.dtm_results["all"].copy()
else:
to_plot = self.basic_results["all"].copy()
datasets = list(set([column for column, _ in to_plot.columns if column]))
models = to_plot[""]["Model"].values
averaged_results = pd.DataFrame({"Model": models})
for dataset in datasets:
averaged_results[dataset] = (
to_plot[dataset]["npmi"] * to_plot[dataset]["diversity"]
)
averaged_results[dataset] = averaged_results[dataset].astype(float).round(3)
return (
averaged_results.style.apply(highlight_max)
.format(
formatter={
(dataset): "{:.3f}"
for dataset in datasets
for method in ["npmi", "diversity"]
}
)
.set_properties(**{"width": "10em", "text-align": "center"})
.set_table_styles([dict(selector="th", props=[("text-align", "center")])])
)
def _load_results(self):
folders = os.listdir(self.main_folder)
if "Basic" in folders:
for folder in os.listdir(self.main_folder + "/Basic"):
self._results_per_folder(f"{self.main_folder}Basic/{folder}")
self._load_all_results()
if "Dynamic Topic Modeling" in folders:
for folder in os.listdir(self.main_folder + "Dynamic Topic Modeling"):
self._load_dtm_results(
f"{self.main_folder}Dynamic Topic Modeling/{folder}"
)
self._load_all_results(dtm=True)
if "Computation" in folders:
self._load_computation_results()
def _load_all_results(self, dtm=False):
# Load data
if dtm:
data = {
dataset: (
dataframe.groupby("Model")
.mean()
.loc[
:,
[
"npmi",
"diversity",
],
]
.reset_index()
)
for dataset, dataframe in self.dtm_results.items()
}
else:
data = {
dataset: (
dataframe.groupby("Model")
.mean()
.loc[
:,
[
"npmi",
"diversity",
],
]
.reset_index()
)
for dataset, dataframe in self.basic_results.items()
}
# Sort by model before concatenating
order = data[list(data.keys())[-2]].sort_values("npmi")["Model"].tolist()
models = pd.DataFrame({"Model": order})
for dataset in data.keys():
data[dataset] = (
data[dataset]
.set_index("Model")
.loc[order]
.reset_index()
.drop("Model", axis=1)
)
# MultiIndex
models.columns = | pd.MultiIndex.from_product([[""], models.columns]) | pandas.MultiIndex.from_product |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime as dt
from typing import Optional
import pandas
import pandas.api.extensions
import pandas.testing
import pyarrow
import pytest
import db_dtypes
SECOND_NANOS = 1_000_000_000
MINUTE_NANOS = 60 * SECOND_NANOS
HOUR_NANOS = 60 * MINUTE_NANOS
def types_mapper(
pyarrow_type: pyarrow.DataType,
) -> Optional[pandas.api.extensions.ExtensionDtype]:
type_str = str(pyarrow_type)
if type_str.startswith("date32") or type_str.startswith("date64"):
return db_dtypes.DateDtype
elif type_str.startswith("time32") or type_str.startswith("time64"):
return db_dtypes.TimeDtype
else:
# Use default type mapping.
return None
SERIES_ARRAYS_DEFAULT_TYPES = [
(pandas.Series([], dtype="dbdate"), pyarrow.array([], type=pyarrow.date32())),
(
pandas.Series([None, None, None], dtype="dbdate"),
pyarrow.array([None, None, None], type=pyarrow.date32()),
),
(
pandas.Series(
[dt.date(2021, 9, 27), None, dt.date(2011, 9, 27)], dtype="dbdate"
),
pyarrow.array(
[dt.date(2021, 9, 27), None, dt.date(2011, 9, 27)],
type=pyarrow.date32(),
),
),
(
pandas.Series(
[dt.date(1677, 9, 22), dt.date(1970, 1, 1), dt.date(2262, 4, 11)],
dtype="dbdate",
),
pyarrow.array(
[dt.date(1677, 9, 22), dt.date(1970, 1, 1), dt.date(2262, 4, 11)],
type=pyarrow.date32(),
),
),
(
pandas.Series([], dtype="dbtime"),
pyarrow.array([], type=pyarrow.time64("ns")),
),
(
pandas.Series([None, None, None], dtype="dbtime"),
pyarrow.array([None, None, None], type=pyarrow.time64("ns")),
),
(
pandas.Series(
[dt.time(0, 0, 0, 0), None, dt.time(23, 59, 59, 999_999)],
dtype="dbtime",
),
pyarrow.array(
[dt.time(0, 0, 0, 0), None, dt.time(23, 59, 59, 999_999)],
type=pyarrow.time64("ns"),
),
),
(
pandas.Series(
[
dt.time(0, 0, 0, 0),
dt.time(12, 30, 15, 125_000),
dt.time(23, 59, 59, 999_999),
],
dtype="dbtime",
),
pyarrow.array(
[
dt.time(0, 0, 0, 0),
dt.time(12, 30, 15, 125_000),
dt.time(23, 59, 59, 999_999),
],
type=pyarrow.time64("ns"),
),
),
]
SERIES_ARRAYS_CUSTOM_ARROW_TYPES = [
(pandas.Series([], dtype="dbdate"), pyarrow.array([], type=pyarrow.date64())),
(
pandas.Series([None, None, None], dtype="dbdate"),
pyarrow.array([None, None, None], type=pyarrow.date64()),
),
(
pandas.Series(
[dt.date(2021, 9, 27), None, dt.date(2011, 9, 27)], dtype="dbdate"
),
pyarrow.array(
[dt.date(2021, 9, 27), None, dt.date(2011, 9, 27)],
type=pyarrow.date64(),
),
),
(
pandas.Series(
[dt.date(1677, 9, 22), dt.date(1970, 1, 1), dt.date(2262, 4, 11)],
dtype="dbdate",
),
pyarrow.array(
[dt.date(1677, 9, 22), dt.date(1970, 1, 1), dt.date(2262, 4, 11)],
type=pyarrow.date64(),
),
),
(
pandas.Series([], dtype="dbtime"),
pyarrow.array([], type=pyarrow.time32("ms")),
),
(
| pandas.Series([None, None, None], dtype="dbtime") | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 10:17:13 2018
@author: David
"""
# Built-in libraries
#import argparse
#import collections
#import multiprocessing
import os
#import pickle
#import time
# External libraries
#import rasterio
#import gdal
import matplotlib.pyplot as plt
from matplotlib.patches import FancyBboxPatch
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from scipy.stats import linregress
from scipy.stats import median_abs_deviation
import xarray as xr
# Local libraries
import debrisglobal.globaldebris_input as debris_prms
from meltcurves import melt_fromdebris_func
#%%% ===== SCRIPT OPTIONS =====
option_melt_comparison = False
option_hd_comparison = True
option_hd_centerline = False
option_hd_spatial_compare = False
hd_obs_fp = debris_prms.main_directory + '/../hd_obs/'
melt_compare_fp = debris_prms.main_directory + '/../hd_obs/figures/hd_melt_compare/'
hd_compare_fp = debris_prms.main_directory + '/../hd_obs/figures/hd_obs_compare/'
hd_centerline_fp = debris_prms.main_directory + '/../hd_obs/centerline_hd/'
if os.path.exists(melt_compare_fp) == False:
os.makedirs(melt_compare_fp)
if os.path.exists(hd_compare_fp) == False:
os.makedirs(hd_compare_fp)
#%% ===== FUNCTIONS =====
def plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn, ds_names=None,
hd_min=0, hd_max=2, hd_tick_major=0.25, hd_tick_minor=0.05,
melt_min=0, melt_max=70, melt_tick_major=10, melt_tick_minor=5,
plot_meltfactor=False, z_value = 1.645, fontsize=11):
#%%
""" Plot comparison of debris vs. melt for various sites """
# Dataset of melt data
ds_ostrem = xr.open_dataset(melt_fp + melt_fn)
ds_ostrem = ds_ostrem.sortby('hd_cm')
time_year = pd.to_datetime(ds_ostrem.time.values).year
time_daysperyear = np.array([366 if x%4 == 0 else 365 for x in time_year])
time_yearfrac = time_year + (pd.to_datetime(ds_ostrem.time.values).dayofyear-1) / time_daysperyear
color_dict = {0:'k', 1:'b', 2:'r'}
symbol_dict = {0:'D', 1:'o', 2:'^'}
# ===== PLOT DEBRIS VS. SURFACE LOWERING =====
fig, ax = plt.subplots(1, 1, squeeze=False, sharex=False, sharey=False,
gridspec_kw = {'wspace':0.4, 'hspace':0.15})
melt_obs_all = []
hd_obs_all = []
melt_mod_all = []
melt_mod_bndlow_all = []
melt_mod_bndhigh_all = []
for n in np.arange(0,len(measured_hd_list)):
measured_hd = measured_hd_list[n]
measured_melt = measured_melt_list[n]
melt_obs_all.extend(measured_melt)
hd_obs_all.extend(measured_hd)
yearfracs = yearfracs_list[n]
start_yearfrac = yearfracs[0]
end_yearfrac = yearfracs[1]
if ds_names is not None:
ds_name = ds_names[n]
else:
ds_name = None
start_idx = np.where(abs(time_yearfrac - start_yearfrac) == abs(time_yearfrac - start_yearfrac).min())[0][0]
end_idx = np.where(abs(time_yearfrac - end_yearfrac) == abs(time_yearfrac - end_yearfrac).min())[0][0]
# Ostrem Curve
debris_thicknesses = ds_ostrem.hd_cm.values
debris_melt_df = pd.DataFrame(np.zeros((len(debris_thicknesses),3)),
columns=['debris_thickness', 'melt_mmwed', 'melt_std_mmwed'])
nelev = 0
for ndebris, debris_thickness in enumerate(debris_thicknesses):
# Units: mm w.e. per day
melt_mmwed = (ds_ostrem['melt'][ndebris,start_idx:end_idx,nelev].values.sum()
* 1000 / len(time_yearfrac[start_idx:end_idx]))
melt_std_mmwed = (ds_ostrem['melt_std'][ndebris,start_idx:end_idx,nelev].values.sum()
* 1000 / len(time_yearfrac[start_idx:end_idx]))
debris_melt_df.loc[ndebris] = debris_thickness / 100, melt_mmwed, melt_std_mmwed
debris_melt_df['melt_bndlow_mmwed'] = debris_melt_df['melt_mmwed'] - z_value * debris_melt_df['melt_std_mmwed']
debris_melt_df['melt_bndhigh_mmwed'] = debris_melt_df['melt_mmwed'] + z_value * debris_melt_df['melt_std_mmwed']
#%%
# MEAN CURVE
fit_idx = list(np.where(debris_thicknesses >= 5)[0])
func_coeff, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_mmwed.values[fit_idx])
melt_cleanice = debris_melt_df.loc[0,'melt_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve = melt_fromdebris_func(debris_4curve, func_coeff[0], func_coeff[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve = np.concatenate([[melt_cleanice], melt_4curve])
# Linearly interpolate between 0 cm and 2 cm for the melt rate
def melt_0to2cm_adjustment(melt, melt_clean, melt_2cm, hd):
""" Linearly interpolate melt factors between 0 and 2 cm
based on clean ice and 2 cm sub-debris melt """
melt[(hd >= 0) & (hd < 0.02)] = (
melt_clean + hd[(hd >= 0) & (hd < 0.02)] / 0.02 * (melt_2cm - melt_clean))
return melt
melt_mod = melt_fromdebris_func(measured_hd, func_coeff[0], func_coeff[1])
melt_2cm = melt_fromdebris_func(0.02, func_coeff[0], func_coeff[1])
melt_mod = melt_0to2cm_adjustment(melt_mod, melt_cleanice, melt_2cm, measured_hd)
melt_mod_all.extend(melt_mod)
# LOWER BOUND CURVE
func_coeff_bndlow, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_bndlow_mmwed.values[fit_idx])
melt_cleanice_bndlow = debris_melt_df.loc[0,'melt_bndlow_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve_bndlow = melt_fromdebris_func(debris_4curve, func_coeff_bndlow[0], func_coeff_bndlow[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve_bndlow = np.concatenate([[melt_cleanice_bndlow], melt_4curve_bndlow])
melt_mod_bndlow = melt_fromdebris_func(measured_hd, func_coeff_bndlow[0], func_coeff_bndlow[1])
melt_2cm_bndlow = melt_fromdebris_func(0.02, func_coeff_bndlow[0], func_coeff_bndlow[1])
melt_mod_bndlow = melt_0to2cm_adjustment(melt_mod_bndlow, melt_cleanice_bndlow, melt_2cm_bndlow, measured_hd)
melt_mod_bndlow_all.extend(melt_mod_bndlow)
# UPPER BOUND CURVE
func_coeff_bndhigh, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_bndhigh_mmwed.values[fit_idx])
melt_cleanice_bndhigh = debris_melt_df.loc[0,'melt_bndhigh_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve_bndhigh = melt_fromdebris_func(debris_4curve, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve_bndhigh = np.concatenate([[melt_cleanice_bndhigh], melt_4curve_bndhigh])
melt_mod_bndhigh = melt_fromdebris_func(measured_hd, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
melt_2cm_bndhigh = melt_fromdebris_func(0.02, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
melt_mod_bndhigh = melt_0to2cm_adjustment(melt_mod_bndhigh, melt_cleanice_bndhigh,melt_2cm_bndhigh, measured_hd)
melt_mod_bndhigh_all.extend(melt_mod_bndhigh)
if plot_meltfactor:
melt_4curve = melt_4curve / melt_cleanice
melt_4curve_bndlow = melt_4curve_bndlow / melt_cleanice
melt_4curve_bndhigh = melt_4curve_bndhigh / melt_cleanice
# Plot curve
ax[0,0].plot(measured_hd, measured_melt, symbol_dict[n], color=color_dict[n],
markersize=3, markerfacecolor="None", markeredgewidth=0.5, zorder=5, label=ds_name, clip_on=False)
ax[0,0].plot(debris_4curve, melt_4curve,
color=color_dict[n], linewidth=1, linestyle='--', zorder=5-n)
ax[0,0].fill_between(debris_4curve, melt_4curve_bndlow, melt_4curve_bndhigh,
color=color_dict[n], linewidth=0, zorder=5-n, alpha=0.2)
# text
# ax[0,0].text(0.5, 1.09, glac_name, size=fontsize-2, horizontalalignment='center', verticalalignment='top',
# transform=ax[0,0].transAxes)
ax[0,0].text(0.5, 1.11, glac_name, size=fontsize-2, horizontalalignment='center', verticalalignment='top',
transform=ax[0,0].transAxes)
# eqn_text = r'$b = \frac{b_{0}}{1 + kb_{0}h}$'
# coeff1_text = r'$b_{0} = ' + str(np.round(func_coeff[0],2)) + '$'
# coeff2_text = r'$k = ' + str(np.round(func_coeff[1],2)) + '$'
# # coeff$\frac{b_{0}}{1 + 2kb_{0}h}$'
# ax[0,0].text(0.9, 0.95, eqn_text, size=12, horizontalalignment='right', verticalalignment='top',
# transform=ax[0,0].transAxes)
# ax[0,0].text(0.615, 0.83, 'where', size=10, horizontalalignment='left', verticalalignment='top',
# transform=ax[0,0].transAxes)
# ax[0,0].text(0.66, 0.77, coeff1_text, size=10, horizontalalignment='left', verticalalignment='top',
# transform=ax[0,0].transAxes)
# ax[0,0].text(0.66, 0.7, coeff2_text, size=10, horizontalalignment='left', verticalalignment='top',
# transform=ax[0,0].transAxes)
# X-label
# ax[0,0].set_xlabel('Debris thickness (m)', size=fontsize)
ax[0,0].set_xlim(hd_min, hd_max)
ax[0,0].xaxis.set_major_locator(plt.MultipleLocator(hd_tick_major))
ax[0,0].xaxis.set_minor_locator(plt.MultipleLocator(hd_tick_minor))
# Y-label
# if plot_meltfactor:
# ylabel_str = 'Melt (-)'
# else:
# ylabel_str = 'Melt (mm w.e. d$^{-1}$)'
# ax[0,0].set_ylabel(ylabel_str, size=fontsize)
ax[0,0].set_ylim(melt_min, melt_max)
ax[0,0].yaxis.set_major_locator(plt.MultipleLocator(melt_tick_major))
ax[0,0].yaxis.set_minor_locator(plt.MultipleLocator(melt_tick_minor))
# Tick parameters
ax[0,0].yaxis.set_ticks_position('both')
ax[0,0].tick_params(axis='both', which='major', labelsize=fontsize-2, direction='inout')
ax[0,0].tick_params(axis='both', which='minor', labelsize=fontsize-4, direction='in')
# Legend
ax[0,0].legend(ncol=1, fontsize=fontsize-3, frameon=True, handlelength=1,
handletextpad=0.15, columnspacing=0.5, borderpad=0.25, labelspacing=0.5, framealpha=0.5)
# Save plot
fig.set_size_inches(2, 1.5)
fig.savefig(melt_compare_fp + fig_fn, bbox_inches='tight', dpi=300, transparent=True)
plt.close()
return hd_obs_all, melt_obs_all, melt_mod_all, melt_mod_bndlow_all, melt_mod_bndhigh_all
#%%
if option_melt_comparison:
# glaciers = ['1.15645', '2.14297', '6.00474', '7.01044', '10.01732', '11.00719', '11.02810', '11.02858', '11.03005',
# '12.01012', '12.01132', '13.05000', '13.43232', '14.06794', '14.16042', '15.03733', '15.03743',
# '15.04045', '15.07886', '15.11758', '18.02397']
glaciers = ['1.15645', '2.14297', '7.01044', '11.00719', '11.02472', '11.02810', '11.02858', '11.03005',
'12.01012', '12.01132', '13.05000', '13.43165', '13.43232', '14.06794', '14.16042', '15.03733',
'15.03743', '15.04045', '15.07122', '15.07886', '15.11758', '18.02375', '18.02397']
# glaciers = ['10.01732']
# glaciers = ['13.43165']
# glaciers = ['13.43232']
# glaciers = ['11.02858']
z_value = 1.645
hd_obs_all, melt_obs_all, melt_mod_all, melt_mod_bndlow_all, melt_mod_bndhigh_all, reg_all = [], [], [], [], [], []
rgiid_all = []
# ===== KENNICOTT (1.15645) ====
if '1.15645' in glaciers:
print('\nmelt comparison with Anderson et al. 2019')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/1.15645_kennicott_anderson_2019-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Kennicott (1.15645)"
fig_fn = '1.15645_hd_melt_And2019.png'
# ds_names = ['Anderson 2019\n(6/18/11$\u2009$-$\u2009$8/16/11)']
ds_names = ['6/18/11$\u2009$-$\u2009$8/16/11']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '6150N-21700E-debris_melt_curve.nc'
yearfracs_list = [[2011 + 169/365, 2011 + 228/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['01'])
rgiid_all.append(['1.15645'])
# ===== Emmons (2.14297) ====
if '2.14297' in glaciers:
print('\nmelt comparison with Moore et al. 2019')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/2.14297_moore2019-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Emmons (2.14297)"
fig_fn = '2.14297_hd_melt_Moo2019.png'
# ds_names = ['Moore 2019\n(7/31/14$\u2009$-$\u2009$8/10/14)']
ds_names = ['7/31/14$\u2009$-$\u2009$8/10/14']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4700N-23825E-debris_melt_curve.nc'
yearfracs_list = [[2014 + 212/365, 2014 + 222/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['02'])
rgiid_all.append(['2.14297'])
# ===== Svinafellsjokull (06.00474) ====
if '6.00474' in glaciers:
print('\nmelt comparison with Moller et al (2016)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/6.00474_moller2016-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df['melt_mf'].values]
glac_name = "Svinafellsjokull (6.00474)"
fig_fn = '6.00474_hd_melt_Moller2016.png'
# ds_names = ['Moller 2016\n(5/17/13$\u2009$-$\u2009$5/30/13)']
ds_names = ['5/17/13$\u2009$-$\u2009$5/30/13']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '6400N-34325E-debris_melt_curve.nc'
yearfracs_list = [[2013 + 137/365, 2013 + 150/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
# melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
# melt_tick_major, melt_tick_minor = 10, 5
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 0.1) * 0.1,1) + 0.1
melt_tick_major, melt_tick_minor = 0.5, 0.1
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor,
plot_meltfactor=True)
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['06'])
rgiid_all.append(['6.00474'])
# ===== Larsbreen (7.01044) ====
if '7.01044' in glaciers:
print('\nmelt comparison with Nicholson and Benn 2006')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/7.01044_larsbreen_NB2006-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Larsbreen (7.01044)"
fig_fn = '7.01044_hd_melt_NichBenn2006.png'
# ds_names = ['Nicholson 2006\n(7/09/02$\u2009$-$\u2009$7/20/02)']
ds_names = ['7/09/02$\u2009$-$\u2009$7/20/02']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '7825N-1600E-debris_melt_curve.nc'
yearfracs_list = [[2002 + 191/366, 2002 + 202/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['07'])
rgiid_all.append(['7.01044'])
# ===== <NAME> (10.01732) ====
if '10.01732' in glaciers:
# print('\nmelt comparison with Mayer et al (2011)')
assert True == False, '10.01732 NEEDS TO DO THE MODELING FIRST!'
# # Data: debris thickness (m) and melt rate (mm w.e. d-1)
# mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/10.01732_mayer2011-melt.csv')
# measured_hd_list = [mb_df.hd_m.values]
# measured_melt_list = [mb_df['melt_mf'].values]
# glac_name = "<NAME> (10.01732)"
# fig_fn = '10.01732_hd_melt_Mayer2011.png'
## ds_names = ['Mayer 2011\n(7/11/07$\u2009$-$\u2009$7/30/07)']
# ds_names = ['7/11/07$\u2009$-$\u2009$7/30/07']
# melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
# melt_fn = '5000N-8775E-debris_melt_curve.nc'
# yearfracs_list = [[2007 + 192/365, 2007 + 211/365]]
#
# hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
# hd_tick_major, hd_tick_minor = 0.1, 0.02
# melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
# melt_tick_major, melt_tick_minor = 10, 5
#
# for n in np.arange(0,len(measured_hd_list)):
# assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
#
# hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
# plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
# melt_fp, melt_fn,
# ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
# hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
# melt_min=melt_min, melt_max=melt_max,
# melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
# hd_obs_all.append(hd_obs)
# melt_obs_all.append(melt_obs)
# melt_mod_all.append(melt_mod)
# melt_mod_bndlow_all.append(melt_mod_bndlow)
# melt_mod_bndhigh_all.append(melt_mod_bndhigh)
# reg_all.append(['10'])
# ===== Vernagtferner (11.00719) ====
if '11.00719' in glaciers:
print('\nmelt comparison with Juen et al (2013)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.00719_vernagtferner_juen2013-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Vernagtferner (11.00719)"
fig_fn = '11.00719_hd_melt_Juen2013.png'
# ds_names = ['Juen 2013\n(6/25/10$\u2009$-$\u2009$7/10/10)']
ds_names = ['6/25/10$\u2009$-$\u2009$7/10/10']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4700N-1075E-debris_melt_curve.nc'
yearfracs_list = [[2010 + 176/365, 2010 + 191/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['11'])
rgiid_all.append(['11.00719'])
# ===== Vernocolo (11.02472) =====
if '11.02472' in glaciers:
print('\nmelt comparison with bocchiola et al. (2015)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.02472_bocchiola2015-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Venerocolo (11.02472)"
fig_fn = '11.02472_hd_melt_Boc2015.png'
ds_names = ['8/10/07$\u2009$-$\u2009$9/13/07']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4625N-1050E-debris_melt_curve.nc'
yearfracs_list = [[2007 + 222/365, 2007 + 256/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 10, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['11'])
rgiid_all.append(['11.02472'])
# ===== Arolla (11.02810) ====
if '11.02810' in glaciers:
print('\nmelt comparison with Reid et al (2012)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.02810_arolla_reid2012-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Arolla (11.02810)"
fig_fn = '11.02810_hd_melt_Reid2012.png'
# ds_names = ['Reid 2012\n(7/28/10$\u2009$-$\u2009$9/09/10)']
ds_names = ['7/28/10$\u2009$-$\u2009$9/09/10']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4600N-750E-debris_melt_curve.nc'
yearfracs_list = [[2010 + 209/365, 2010 + 252/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 10, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['11'])
rgiid_all.append(['11.02810'])
# ===== Belvedere (11.02858) ====
if '11.02858' in glaciers:
print('\nmelt comparison with Nicholson and Benn (2006)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.02858_belvedere_nb2006-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Belvedere (11.02858)"
fig_fn = '11.02858_hd_melt_NB2006.png'
# ds_names = ['Nicholson 2006\n(8/06/03$\u2009$-$\u2009$8/10/03)']
ds_names = ['8/06/03$\u2009$-$\u2009$8/10/03']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4600N-800E-debris_melt_curve.nc'
yearfracs_list = [[2003 + 218/365, 2003 + 222/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 40, 10
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['11'])
rgiid_all.append(['11.02858'])
# ===== MIAGE (11.03005) ====
if '11.03005' in glaciers:
print('\nmelt comparison with Reid and Brock (2010)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.03005_reid2010-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = 'Miage (11.03005)'
fig_fn = '11.03005_hd_melt_Reid2010.png'
# ds_names = ['Reid 2010\n(6/21/05$\u2009$-$\u2009$9/04/05)']
ds_names = ['6/21/05$\u2009$-$\u2009$9/04/05']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4575N-675E-debris_melt_curve.nc'
yearfracs_list = [[2005 + 172/365, 2005 + 247/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['11'])
rgiid_all.append(['11.03005'])
# ===== Zopkhito (12.01012) ====
if '12.01012' in glaciers:
print('\nmelt comparison with Lambrecht et al (2011)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/12.01012_lambrecht2011-melt2008.csv')
mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/12.01012_lambrecht2011-melt2009.csv')
measured_hd_list = [mb_df.hd_m.values, mb_df2.hd_m.values]
measured_melt_list = [mb_df['melt_mmwed'].values, mb_df2['melt_mmwed'].values]
glac_name = "Zopkhito (12.01012)"
fig_fn = '12.01012_hd_melt_Lambrecht2011.png'
# ds_names = ['Lambrecht 2011\n(6/20/08$\u2009$-$\u2009$6/27/08)',
# 'Lambrecht 2011\n(7/01/09$\u2009$-$\u2009$7/08/09)']
ds_names = ['6/26/08$\u2009$-$\u2009$7/01/08',
'7/13/09$\u2009$-$\u2009$7/17/09']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4300N-4350E-debris_melt_curve.nc'
yearfracs_list = [[2008 + 178/366, 2008 + 183/366], [2009 + 194/365, 2009 + 198/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 40, 10
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['12'])
rgiid_all.append(['12.01012'])
# ===== Djankuat (12.01132) ====
if '12.01132' in glaciers:
print('\nmelt comparison with Lambrecht et al (2011)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/12.01132_lambrecht2011-melt2007.csv')
mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/12.01132_lambrecht2011-melt2008.csv')
measured_hd_list = [mb_df.hd_m.values, mb_df2.hd_m.values]
measured_melt_list = [mb_df['melt_mmwed'].values, mb_df2['melt_mmwed'].values]
glac_name = "Djankuat (12.01132)"
fig_fn = '12.01132_hd_melt_Lambrecht2011.png'
ds_names = ['6/26/07$\u2009$-$\u2009$6/30/07','6/30/08$\u2009$-$\u2009$9/14/08']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4325N-4275E-debris_melt_curve.nc'
yearfracs_list = [[2007 + 177/366, 2007 + 181/366], [2008 + 182/366, 2008 + 258/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['12'])
rgiid_all.append(['12.01132'])
# ===== S Inylchek (13.05000) ====
if '13.05000' in glaciers:
print('\nmelt comparison with Hagg et al (2008)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.05000_hagg2008-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "S Inylchek (13.05000)"
fig_fn = '13.05000_hd_melt_Hagg2008.png'
# ds_names = ['Hagg 2008\n(7/30/05$\u2009$-$\u2009$8/10/05)']
ds_names = ['7/30/05$\u2009$-$\u2009$8/10/05']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4200N-8025E-debris_melt_curve.nc'
yearfracs_list = [[2005 + 211/365, 2005 + 222/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['13'])
rgiid_all.append(['13.05000'])
# ===== No 72 =====
if '13.43165' in glaciers:
print('\nmelt comparison with Wang et al (2017)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.43165_wang2017-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Qingbingtan (13.43165)"
fig_fn = '13.43165_hd_melt_wang2017.png'
ds_names = ['8/01/08$\u2009$-$\u2009$8/01/09']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4175N-8000E-debris_melt_curve.nc'
yearfracs_list = [[2008 + 214/366, 2009 + 213/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['13'])
rgiid_all.append(['13.43165'])
# ===== Koxkar (13.43232) ====
if '13.43232' in glaciers:
print('\nmelt comparison with Han et al 2006 (measured via conduction place, not ablation stake)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.43232_Han2006-melt_site1.csv')
mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.43232_Han2006-melt_site2.csv')
mb_df3 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.43232_Han2006-melt_site3.csv')
measured_hd_list = [mb_df.hd_m.values, mb_df2.hd_m.values, mb_df3.hd_m.values,]
measured_melt_list = [mb_df.melt_mmwed.values, mb_df2.melt_mmwed.values, mb_df3.melt_mmwed.values]
glac_name = "Koxkar (13.43232)"
fig_fn = '13.43232_hd_melt_han2006.png'
ds_names = ['09/15/04$\u2009$-$\u2009$09/19/04', '09/24/04$\u2009$-$\u2009$09/28/04',
'10/02/04$\u2009$-$\u2009$10/06/04']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4175N-8000E-debris_melt_curve.nc'
yearfracs_list = [[2004 + 259/366, 2004 + 263/366], [2004 + 268/366, 2004 + 272/366],
[2004 + 276/366, 2004 + 280/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.5, 0.1
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 10, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['13'])
rgiid_all.append(['13.43232'])
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.43232_juen2014-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Koxkar (13.43232)"
fig_fn = '13.43232_hd_melt_juen2014.png'
# ds_names = ['Juen 2014\n(8/10/10$\u2009$-$\u2009$8/29/10)']
ds_names = ['8/10/10$\u2009$-$\u2009$8/29/10']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4175N-8000E-debris_melt_curve.nc'
yearfracs_list = [[2010 + 222/365, 2010 + 241/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['13'])
rgiid_all.append(['13.43232'])
# ===== Baltoro (14.06794) ====
if '14.06794' in glaciers:
print('\nmelt comparison with Mihalcea et al 2006 and Groos et al 2017')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/14.06794_mihalcea2006-melt.csv')
mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/14.06794_groos2017-melt.csv')
measured_hd_list = [mb_df.hd_m.values, mb_df2.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values, mb_df2.melt_mmwed.values]
glac_name = "Baltoro (14.06794)"
fig_fn = '14.06794_hd_melt_Mih2006_Gro2017.png'
# ds_names = ['Mihalcea 2006\n(7/04/04$\u2009$-$\u2009$7/14/04)', 'Groos 2017\n(7/22/11$\u2009$-$\u2009$8/10/11)']
ds_names = ['7/04/04$\u2009$-$\u2009$7/14/04', '7/22/11$\u2009$-$\u2009$8/10/11']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '3575N-7650E-debris_melt_curve.nc'
yearfracs_list = [[2004 + 186/366, 2004 + 196/366],
[2011 + 203/365, 2011 + 222/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['14'])
rgiid_all.append(['14.06794'])
# ===== Batal (14.16042) ====
if '14.16042' in glaciers:
print('\nmelt comparison with Patel et al (2016)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/14.16042_patel2016-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = 'Batal (14.16042)'
fig_fn = '14.16042_hd_melt_Patel2016.png'
# ds_names = ['Patel 2016\n(8/01/14$\u2009$-$\u2009$10/15/14)']
ds_names = ['8/01/14$\u2009$-$\u2009$10/15/14']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '3225N-7750E-debris_melt_curve.nc'
yearfracs_list = [[2014 + 213/365, 2014 + 288/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 10, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['14'])
rgiid_all.append(['14.16042'])
# ===== Khumbu (15.03733) ====
if '15.03733' in glaciers:
print('\nmelt comparison with Kayastha et al 2000')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.03733_kayastha2000-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = 'Khumbu (15.03733)'
fig_fn = '15.03733_hd_melt_Kay2000.png'
# ds_names = ['Kayastha 2000\n(5/22/00$\u2009$-$\u2009$6/01/00)']
ds_names = ['5/22/00$\u2009$-$\u2009$6/01/00']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '2800N-8700E-debris_melt_curve.nc'
yearfracs_list = [[2000 + 143/366, 2000 + 153/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['15'])
rgiid_all.append(['13.03733'])
# ===== Imja-Lhotse Shar (15.03743) ====
if '15.03743' in glaciers:
print('\nmelt comparison with Rounce et al. (2015)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.03743_rounce2015-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = 'Imja-L<NAME> (15.03743)'
fig_fn = '15.03743_hd_melt_Rou2015.png'
ds_names = ['5/18/14$\u2009$-$\u2009$11/09/14']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '2800N-8700E-debris_melt_curve.nc'
yearfracs_list = [[2014 + 138/365, 2014 + 315/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 10, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['15'])
rgiid_all.append(['15.03743'])
# ===== Lirung (15.04045) ====
if '15.04045' in glaciers:
print('\nmelt comparison with Chand et al 2015')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df1 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.04045_chand2015_fall-melt.csv')
mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.04045_chand2015_winter-melt.csv')
mb_df3 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.04045_chand2015_spring-melt.csv')
measured_hd_list = [mb_df1.hd_m.values, mb_df2.hd_m.values, mb_df3.hd_m.values]
measured_melt_list = [mb_df1.melt_mmwed.values, mb_df2.melt_mmwed.values, mb_df3.melt_mmwed.values]
glac_name = 'Lirung (15.04045)'
fig_fn = '15.04045_hd_melt_Cha2015.png'
# ds_names = ['Chand 2000\n(9/22/13$\u2009$-$\u2009$10/03/13)', 'Chand 2000\n(11/29/13$\u2009$-$\u2009$12/12/13)',
# 'Chand 2000\n(4/07/14$\u2009$-$\u2009$4/19/14)']
ds_names = ['9/22/13$\u2009$-$\u2009$10/03/13', '11/29/13$\u2009$-$\u2009$12/12/13',
'4/07/14$\u2009$-$\u2009$4/19/14']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '2825N-8550E-debris_melt_curve.nc'
yearfracs_list = [[2013 + 265/365, 2013 + 276/365], [2013 + 333/365, 2013 + 346/365],
[2014 + 97/365, 2014 + 109/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['15'])
rgiid_all.append(['15.04045'])
# ===== Satopanth (15.07122) ====
if '15.07122' in glaciers:
print('\nmelt comparison with Shah et al (2019) - MUST BE DONE FOR INDIVIDUAL STAKES, NO MELT CURVE')
# # Data: debris thickness (m) and melt rate (mm w.e. d-1)
# mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.07122_shah2019-melt.csv')
# measured_hd_list = [mb_df.hd_m.values]
# measured_melt_list = [mb_df.melt_mmwed.values]
# glac_name = 'Satopanth (15.07122)'
# fig_fn = '15.07122_hd_melt_shah2019.png'
# ds_names = ['5/21/16$\u2009$-$\u2009$10/24/16']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '3075N-7925E-debris_melt_curve.nc'
# yearfracs_list = [[2016 + 142/366, 2016 + 298/366]]
#
# hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
# hd_tick_major, hd_tick_minor = 0.1, 0.02
# melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
# melt_tick_major, melt_tick_minor = 10, 5
#
# for n in np.arange(0,len(measured_hd_list)):
# assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
# hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
# plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
# melt_fp, melt_fn,
# ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
# hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
# melt_min=melt_min, melt_max=melt_max,
# melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
# hd_obs_all.append(hd_obs)
# melt_obs_all.append(melt_obs)
# melt_mod_all.append(melt_mod)
# melt_mod_bndlow_all.append(melt_mod_bndlow)
# melt_mod_bndhigh_all.append(melt_mod_bndhigh)
# reg_all.append(['15'])
# rgiid_all.append(['15.07122'])
obs_df_fullfn = (debris_prms.main_directory +
'/../hd_obs/datasets/Shah2019_satopanth/satopanth2019-melt-processed.csv')
if os.path.exists(obs_df_fullfn):
obs_df = pd.read_csv(obs_df_fullfn)
else:
# Special processing since dates vary drastically
obs_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/Shah2019_satopanth/satopanth2019-melt.csv')
obs_df['hd_m'] = obs_df.hd_cm / 100
obs_df['doy_start'] = obs_df['doy'] - obs_df['obs_period_d']
obs_df['doy_end'] = obs_df['doy']
obs_df['mb_obs_mmwed'] = obs_df.melt_cm * 0.9 * 10 / obs_df.obs_period_d
obs_df['mb_mod_mmwed'] = np.nan
obs_df['mb_mod_mmwed_low'] = np.nan
obs_df['mb_mod_mmwed_high'] = np.nan
ds_ostrem = xr.open_dataset(melt_fp + melt_fn)
ds_ostrem = ds_ostrem.sortby('hd_cm')
debris_thicknesses = ds_ostrem.hd_cm.values
time_year = pd.to_datetime(ds_ostrem.time.values).year
time_daysperyear = np.array([366 if x%4 == 0 else 365 for x in time_year])
time_yearfrac = time_year + (pd.to_datetime(ds_ostrem.time.values).dayofyear-1) / time_daysperyear
# Loop through each point individually because they all differ
for ndata in obs_df.index.values:
measured_hd = obs_df.loc[ndata,'hd_m']
# Start and end indices
if obs_df.loc[ndata,'year']%4 == 0:
daysperyear = 366
else:
daysperyear = 365
start_yearfrac = obs_df.loc[ndata,'year'] + obs_df.loc[ndata,'doy_start'] / daysperyear
end_yearfrac = obs_df.loc[ndata,'year'] + obs_df.loc[ndata,'doy_end'] / daysperyear
start_idx = np.where(abs(time_yearfrac - start_yearfrac) == abs(time_yearfrac - start_yearfrac).min())[0][0]
end_idx = np.where(abs(time_yearfrac - end_yearfrac) == abs(time_yearfrac - end_yearfrac).min())[0][0]
# Ostrem Curve
debris_melt_df = pd.DataFrame(np.zeros((len(debris_thicknesses),3)),
columns=['debris_thickness', 'melt_mmwed', 'melt_std_mmwed'])
nelev = 0
for ndebris, debris_thickness in enumerate(debris_thicknesses):
# Units: mm w.e. per day
melt_mmwed = (ds_ostrem['melt'][ndebris,start_idx:end_idx,nelev].values.sum()
* 1000 / len(time_yearfrac[start_idx:end_idx]))
melt_std_mmwed = (ds_ostrem['melt_std'][ndebris,start_idx:end_idx,nelev].values.sum()
* 1000 / len(time_yearfrac[start_idx:end_idx]))
debris_melt_df.loc[ndebris] = debris_thickness / 100, melt_mmwed, melt_std_mmwed
debris_melt_df['melt_bndlow_mmwed'] = debris_melt_df['melt_mmwed'] - z_value * debris_melt_df['melt_std_mmwed']
debris_melt_df['melt_bndhigh_mmwed'] = debris_melt_df['melt_mmwed'] + z_value * debris_melt_df['melt_std_mmwed']
# MEAN CURVE
fit_idx = list(np.where(debris_thicknesses >= 5)[0])
func_coeff, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_mmwed.values[fit_idx])
melt_cleanice = debris_melt_df.loc[0,'melt_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve = melt_fromdebris_func(debris_4curve, func_coeff[0], func_coeff[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve = np.concatenate([[melt_cleanice], melt_4curve])
# Linearly interpolate between 0 cm and 2 cm for the melt rate
def melt_0to2cm_adjustment_value(melt, melt_clean, melt_2cm, hd):
""" Linearly interpolate melt factors between 0 and 2 cm
based on clean ice and 2 cm sub-debris melt """
# ADJUST SINGLE VALUE TO LIST
melt = np.array([melt])
hd = np.array([hd])
melt[(hd >= 0) & (hd < 0.02)] = (
melt_clean + hd[(hd >= 0) & (hd < 0.02)] / 0.02 * (melt_2cm - melt_clean))
return melt
melt_mod = melt_fromdebris_func(measured_hd, func_coeff[0], func_coeff[1])
melt_2cm = melt_fromdebris_func(0.02, func_coeff[0], func_coeff[1])
melt_mod = melt_0to2cm_adjustment_value(melt_mod, melt_cleanice, melt_2cm, measured_hd)
# LOWER BOUND CURVE
func_coeff_bndlow, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_bndlow_mmwed.values[fit_idx])
melt_cleanice_bndlow = debris_melt_df.loc[0,'melt_bndlow_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve_bndlow = melt_fromdebris_func(debris_4curve, func_coeff_bndlow[0], func_coeff_bndlow[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve_bndlow = np.concatenate([[melt_cleanice_bndlow], melt_4curve_bndlow])
melt_mod_bndlow = melt_fromdebris_func(measured_hd, func_coeff_bndlow[0], func_coeff_bndlow[1])
melt_2cm_bndlow = melt_fromdebris_func(0.02, func_coeff_bndlow[0], func_coeff_bndlow[1])
melt_mod_bndlow = melt_0to2cm_adjustment_value(melt_mod_bndlow, melt_cleanice_bndlow, melt_2cm_bndlow,
measured_hd)
# UPPER BOUND CURVE
func_coeff_bndhigh, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_bndhigh_mmwed.values[fit_idx])
melt_cleanice_bndhigh = debris_melt_df.loc[0,'melt_bndhigh_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve_bndhigh = melt_fromdebris_func(debris_4curve, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve_bndhigh = np.concatenate([[melt_cleanice_bndhigh], melt_4curve_bndhigh])
melt_mod_bndhigh = melt_fromdebris_func(measured_hd, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
melt_2cm_bndhigh = melt_fromdebris_func(0.02, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
melt_mod_bndhigh = melt_0to2cm_adjustment_value(melt_mod_bndhigh, melt_cleanice_bndhigh,melt_2cm_bndhigh,
measured_hd)
# RECORD THE DATA
obs_df.loc[ndata,'mb_mod_mmwed'] = melt_mod[0]
obs_df.loc[ndata,'mb_mod_mmwed_low'] = melt_mod_bndlow[0]
obs_df.loc[ndata,'mb_mod_mmwed_high'] = melt_mod_bndhigh[0]
# Compare all time steps (all intervals over the entire melt season included)
fig, ax = plt.subplots(1, 1, squeeze=False, gridspec_kw = {'wspace':0, 'hspace':0})
ax[0,0].scatter(obs_df.mb_obs_mmwed.values, obs_df.mb_mod_mmwed.values,
color='k', marker='o', linewidth=0.5, facecolor='none', s=30, zorder=1, clip_on=True)
ax[0,0].plot([0,200],[0,200], color='k', linewidth=0.5)
ax[0,0].set_xlim(0,75)
ax[0,0].set_ylim(0,75)
fig.set_size_inches(3.45,3.45)
fig_fullfn = melt_compare_fp + 'melt_compare-Satopanth_all_timesteps.png'
fig.savefig(fig_fullfn, bbox_inches='tight', dpi=150)
obs_df.to_csv(obs_df_fullfn)
# obs_df_stakes = pd.DataFrame(np.zeros((0, len(obs_df.columns))), columns=list(obs_df.columns))
# unique_years = list(obs_df.year.unique())
# for nyear, year in enumerate(unique_years[0:1]):
# obs_df_year = obs_df[obs_df['year'] == year]
# obs_df_year.reset_index(inplace=True, drop=True)
#
# stake_ids = list(obs_df_year.stake_id.unique())
# for nstake, stake_id in enumerate(stake_ids[0:10]):
# obs_df_stakes_subset = obs_df_year[obs_df_year['stake_id'] == stake_id]
# obs_df_stakes_subset.reset_index(inplace=True, drop=True)
#
# # Melt average over the duration of period
# obs_melt_cm_total = obs_df_stakes_subset.melt_cm.sum()
# obs_period_d_total = obs_df_stakes_subset.obs_period_d.sum()
# obs_mmwed = obs_melt_cm_total * 0.9 * 10 / obs_period_d_total
# # Start and end date of the entire duration
# start_doy = obs_df_stakes_subset.doy_start.min()
# end_doy = obs_df_stakes_subset.doy_end.max()
hd_obs_all.append(list(obs_df.hd_m.values))
melt_obs_all.append(list(obs_df.mb_obs_mmwed.values))
melt_mod_all.append(list(obs_df.mb_mod_mmwed.values))
melt_mod_bndlow_all.append(list(obs_df.mb_mod_mmwed_low))
melt_mod_bndhigh_all.append(list(obs_df.mb_mod_mmwed_high))
reg_all.append(['15'])
rgiid_all.append(['15.07122'])
# ===== Hailuogou (15.07886) ====
if '15.07886' in glaciers:
print('\nmelt comparison with Zhang et al (2011)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
measured_hd_list = [np.array([2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 5, 5, 6, 7, 7, 10, 10, 11, 13]) / 100]
measured_melt_list = [np.array([65.2, 55.4, 52.8, 51.6, 47.0, 53.4, 44.4, 50.3, 58, 48.9, 58.4, 54.4, 44.8,
52.6, 43.7, 52.5, 38.5, 36.5, 34.2, 28.4])]
glac_name = 'Hailuogou (15.07886)'
fig_fn = '15.07886_hd_melt_Zhang2011.png'
# ds_names = ['Zhang 2011\n(7/02/08$\u2009$-$\u2009$9/30/08)']
ds_names = ['7/02/08$\u2009$-$\u2009$9/30/08']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '2950N-10200E-debris_melt_curve.nc'
yearfracs_list = [[2008 + 184/366, 2008 + 274/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['15'])
rgiid_all.append(['15.07886'])
# ===== 24K (15.11758) ====
if '15.11758' in glaciers:
print('\nmelt comparison with Wei et al (2010) and Yang et al (2017)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.11758_yang2017-melt.csv')
mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.11758_wei2010-melt.csv')
measured_hd_list = [mb_df.hd_m.values, mb_df2.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values, mb_df2.melt_mmwed.values]
glac_name = "24K (15.11758)"
fig_fn = '15.11758_hd_melt_Wei2010_Yang2017.png'
# ds_names = ['Yang 2017\n(6/01/16$\u2009$-$\u2009$9/30/16)', 'Wei et al 2010\n(7/19/08$\u2009$-$\u2009$9/04/08)']
ds_names = ['6/01/16$\u2009$-$\u2009$9/30/16', '7/19/08$\u2009$-$\u2009$9/04/08']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '2975N-9575E-debris_melt_curve.nc'
yearfracs_list = [[2016 + 153/366, 2016 + 274/366], [2008 + 201/366, 2008 + 248/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['15'])
rgiid_all.append(['15.11758'])
# ===== Fox (18.02375) ====
if '18.02375' in glaciers:
print('\nmelt comparison with Brook and Paine (2012)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/18.02375_brook2012-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = 'Fox (18.02375)'
fig_fn = '18.02375_hd_melt_Brook2012.png'
ds_names = ['11/23/07$\u2009$-$\u2009$12/03/07']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4350S-17025E-debris_melt_curve.nc'
yearfracs_list = [[2007 + 327/365, 2007 + 337/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['18'])
rgiid_all.append(['18.02375'])
# ===== <NAME> (18.02397) ====
if '18.02397' in glaciers:
print('\nmelt comparison with Brook et al (2013)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/18.02397_brook2013-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = '<NAME> (18.02397)'
fig_fn = '18.02397_hd_melt_Brook2013.png'
# ds_names = ['Brook 2013\n(2/07/12$\u2009$-$\u2009$2/16/12)']
ds_names = ['2/07/12$\u2009$-$\u2009$2/16/12']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4350S-17025E-debris_melt_curve.nc'
yearfracs_list = [[2012 + 38/366, 2012 + 47/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 40, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['18'])
rgiid_all.append(['18.02397'])
#%% ----- ROOT MEAN SQUARE ERROR PLOT!!! -----
melt_obs_all_values, melt_mod_all_values = [], []
melt_df_all = None
melt_df_cns = ['reg', 'hd', 'melt_obs', 'melt_mod', 'melt_mod_bndlow', 'melt_mod_bndhigh']
for nlist in np.arange(len(melt_obs_all)):
melt_df = pd.DataFrame(np.zeros((len(melt_obs_all[nlist]), len(melt_df_cns))), columns=melt_df_cns)
melt_df['reg'] = reg_all[nlist][0]
melt_df['hd'] = hd_obs_all[nlist]
melt_df['melt_obs'] = melt_obs_all[nlist]
melt_df['melt_mod'] = melt_mod_all[nlist]
melt_df['melt_mod_bndlow'] = melt_mod_bndlow_all[nlist]
melt_df['melt_mod_bndhigh'] = melt_mod_bndhigh_all[nlist]
if melt_df_all is None:
melt_df_all = melt_df
else:
melt_df_all = pd.concat([melt_df_all, melt_df], axis=0)
# Correlation
slope, intercept, r_value, p_value, std_err = linregress(melt_df_all.melt_obs.values, melt_df_all.melt_mod.values)
print('melt compare: r = ' + str(np.round(r_value,2)), '(p = ' + str(np.round(p_value,3)) +
', slope = ' + str(np.round(slope,2)) + ', intercept = ' + str(np.round(intercept,2)) + ')')
# Root mean square error
# All
rmse = (np.sum((melt_df_all.melt_obs.values - melt_df_all.melt_mod.values)**2) / melt_df_all.shape[0])**0.5
print('RMSE analysis:', rmse)
#%%
# subset
rmse_hd_list = [(0,0.05), (0.05,0.1), (0.1,0.2), (0.2, 1)]
for rmse_hd in rmse_hd_list:
melt_df_all_subset = melt_df_all[(melt_df_all['hd'] >= rmse_hd[0]) & (melt_df_all['hd'] < rmse_hd[1])]
rmse = (np.sum((melt_df_all_subset['melt_obs'].values - melt_df_all_subset['melt_mod'])**2) /
melt_df_all_subset.shape[0])**0.5
print(' hd:', rmse_hd, '(n=' + str(melt_df_all_subset.shape[0]) + ')', 'RMSE:', np.round(rmse,2))
# Correlation
slope, intercept, r_value, p_value, std_err = linregress(melt_df_all_subset['melt_obs'].values,
melt_df_all_subset['melt_mod'].values)
print(' r = ' + str(np.round(r_value,2)), '(p = ' + str(np.round(p_value,3)) +
', slope = ' + str(np.round(slope,2)) + ', intercept = ' + str(np.round(intercept,2)) + ')')
#%%
fig, ax = plt.subplots(1, 1, squeeze=False, gridspec_kw = {'wspace':0, 'hspace':0})
ax[0,0].scatter(melt_df_all.melt_obs.values, melt_df_all.melt_mod.values,
color='k', marker='o', linewidth=0.5, facecolor='none', s=30, zorder=1, clip_on=True)
ax[0,0].plot([0,200],[0,200], color='k', linewidth=0.5)
ax[0,0].set_xlim(0,125)
ax[0,0].set_ylim(0,125)
fig.set_size_inches(3.45,3.45)
fig_fullfn = melt_compare_fp + 'melt_compare-wellmeasured_lowres.png'
fig.savefig(fig_fullfn, bbox_inches='tight', dpi=150)
print('\n\nThe outliers where we underestimate is likely due to aspect and slope\n\n')
print('To-do list:')
print(' - color by debris thickness')
print(' - consider removing error bars')
print(' - add individual glacier names to facilitate main plot of curve comparisons')
#%%
marker_list = ['P', 'X', 'o', '<', 'v', '>', '^', 'd', 'h', 'p', 'D', '*', 'H', '8']
marker_dict = {'01':'P', '02':'X', '07':'h', '11':'o', '12':'^', '13':'<', '14':'v', '15':'>', '18':'*'}
markers_per_roi = False
fig, ax = plt.subplots(1, 1, squeeze=False, gridspec_kw = {'wspace':0, 'hspace':0})
reg_str_list = []
count_reg = -1
for nroi, reg_str in enumerate(melt_df_all.reg.unique()):
# for nroi, reg_str in enumerate(['01']):
if reg_str not in reg_str_list:
label_str = reg_str
reg_str_list.append(reg_str)
count_reg += 1
else:
label_str = None
if not markers_per_roi:
label_str = None
melt_df_subset = melt_df_all[melt_df_all['reg'] == reg_str].copy()
melt_df_subset['err'] = (np.abs(melt_df_subset.melt_mod_bndhigh.values - melt_df_subset.melt_mod.values) +
np.abs(melt_df_subset.melt_mod_bndlow - melt_df_subset.melt_mod) / 2)
if markers_per_roi:
marker = marker_dict[reg_str]
else:
marker = 'o'
# Size thresholds
s_plot = 20
lw = 0.3
lw_err = 0.1
colors = 'k'
zorders = [3,4,5]
# Color symbols by debris thickness
hd_values = melt_df_subset.hd.values
hd_colors = list(melt_df_subset.hd.values)
for nd, hd in enumerate(hd_colors):
if hd <= 0.05:
hd_colors[nd] = '#2c7bb6'
elif hd <= 0.1:
hd_colors[nd] = '#abd9e9'
elif hd <= 0.2:
hd_colors[nd] = '#ffffbf'
elif hd <= 0.4:
hd_colors[nd] = '#fdae61'
else:
hd_colors[nd] = '#d7191c'
ax[0,0].scatter(melt_df_subset['melt_obs'], melt_df_subset['melt_mod'], marker=marker, edgecolor=hd_colors,
linewidth=lw, facecolor='none', s=30, zorder=3, label=label_str, clip_on=True)
# ax[0,0].errorbar(melt_df_subset['melt_obs'].values, melt_df_subset['melt_mod'].values,
# xerr=melt_df_subset['err'].values, yerr=None,
# capsize=1, capthick=0.15, elinewidth=lw_err, linewidth=0, color='grey', alpha=1, zorder=2,
# label=None)
# Labels
ax[0,0].set_xlabel('Observed Melt (mm w.e. d$^{-1}$)', size=12)
ax[0,0].set_ylabel('Modeled Melt (mm w.e. d$^{-1}$)', size=12)
ax[0,0].set_xlim(0,125)
ax[0,0].set_ylim(0,125)
ax[0,0].plot([0,200],[0,200], color='k', linewidth=0.5, zorder=1)
ax[0,0].xaxis.set_major_locator(plt.MultipleLocator(25))
ax[0,0].xaxis.set_minor_locator(plt.MultipleLocator(5))
ax[0,0].yaxis.set_major_locator(plt.MultipleLocator(25))
ax[0,0].yaxis.set_minor_locator(plt.MultipleLocator(5))
# Tick parameters
ax[0,0].tick_params(axis='both', which='major', labelsize=12, direction='inout')
ax[0,0].tick_params(axis='both', which='minor', labelsize=10, direction='in')
# Legend
# leg = ax[0,0].legend(loc='upper left', ncol=1, fontsize=10, frameon=False, handlelength=1,
# handletextpad=0.15, columnspacing=0.25, borderpad=0.25, labelspacing=0.5,
# bbox_to_anchor=(1.035, 1.0), title=' ')
nlabel = 0
none_count = 5
# Hack to get proper columns
if markers_per_roi:
obs_labels = [None, '< 0.05', '0.05-0.10', '0.10-0.20', '0.20-0.40', '>0.40']
else:
obs_labels = ['< 0.05', '0.05-0.10', '0.10-0.20', '0.20-0.40', '>0.40']
colors = ['#2c7bb6', '#abd9e9', '#ffffbf', '#fdae61', '#d7191c']
for obs_label in obs_labels:
if obs_label is None:
none_count += 1
ax[0,0].scatter([-5],[-5], color='k', marker='s', linewidth=1,
edgecolor='white', facecolor='white', s=1, zorder=3, label=' '*none_count)
else:
ax[0,0].scatter([-5],[-5], color=colors[nlabel], marker='s', linewidth=lw,
facecolor=colors[nlabel], s=30, zorder=3, label=obs_label)
nlabel += 1
if markers_per_roi:
leg = ax[0,0].legend(loc='upper left', ncol=3, fontsize=10, frameon=True, handlelength=1,
handletextpad=0.15, columnspacing=0.25, borderpad=0.25, labelspacing=0.5,
bbox_to_anchor=(0.0,1.01), title='Region $h_{d}$ ', framealpha=1)
else:
leg = ax[0,0].legend(loc='upper left', ncol=1, fontsize=10, frameon=True, handlelength=1,
handletextpad=0.25, columnspacing=0, borderpad=0.2, labelspacing=0.2,
bbox_to_anchor=(0.0,1.01), title='$h_{d}$ (m)', framealpha=1)
# for nmarker in np.arange(0,count_reg+1):
# leg.legendHandles[nmarker]._sizes = [30]
# leg.legendHandles[nmarker]._linewidths = [0.5]
# leg.legendHandles[nmarker].set_edgecolor('k')
# ax[0,0].text(0.17, 0.98, 'Region', size=10, horizontalalignment='center', verticalalignment='top',
# transform=ax[0,0].transAxes, zorder=4)
# ax[0,0].text(1.5, 0.95, '$n_{obs}$', size=10, horizontalalignment='center', verticalalignment='top',
# transform=ax[0,0].transAxes)
# # Create a Rectangle patchss
# rect = FancyBboxPatch((4.35,2.35),2.1,1.45,linewidth=1, edgecolor='lightgrey', facecolor='none', clip_on=False,
# boxstyle='round, pad=0.1')
# ax[0,0].add_patch(rect)
# ax[0,0].axvline(x=5.45, ymin=0.565, ymax=0.97, clip_on=False, color='lightgrey', linewidth=1)
fig.set_size_inches(3.45,3.45)
fig_fullfn = melt_compare_fp + 'melt_compare.png'
fig.savefig(fig_fullfn, bbox_inches='tight', dpi=300)
#%%
if option_hd_comparison:
# All glaciers
# glaciers = ['1.15645', '2.14297', '7.01044', '7.01107', '11.00106', '11.01604', '11.02472', '11.02810', '11.03005',
# '12.01132', '13.43165', '13.43174', '13.43232', '13.43207', '14.06794', '14.16042', '15.03473',
# '15.03733', '15.03743', '15.04045', '15.07122', '15.07886', '15.11758', '17.13720', '18.02397']
# Good glaciers for publication quality figure
glaciers = ['1.15645', '2.14297', '11.00106', '11.01604', '11.02472', '11.02810', '11.03005',
'11.01450', '11.01509', '11.01827', '11.02749', '11.02771', '11.02796',
'13.43165', '13.43174', '13.43232', '13.43207', '14.06794', '14.15536', '14.16042',
'15.03733', '15.03473','15.04045', '15.03743', '15.07122', '15.07886', '15.11758', '17.13720',
'18.02397']
# roughly estimated from maps
# glaciers = ['13.43165', '13.43174', '13.43207']
# glaciers = ['14.15536']
# glaciers = ['11.03005']
process_files = True
regional_hd_comparison = True
bin_width = 50
n_obs_min = 5
total_obs_count_all = 0
total_obs_count = 0
if process_files:
# #%%
# glaciers_subset = [
# '18.02397']
# hd_compare_all_subset = hd_compare_all[hd_compare_all['glacno'] == 2397]
# print('\nn_obs:', int(np.round(hd_compare_all_subset.obs_count.sum())),
# '\ndensity:', np.round(hd_compare_all_subset.obs_count.sum() / hd_compare_all_subset.dc_bin_area_km2.sum(),1))
# #%%
hd_datasets_fp = hd_obs_fp + 'datasets/'
hd_datasets_pt_fp = hd_obs_fp + 'datasets/hd_pt_data/'
hd_ds_dict = {'1.15645': [(hd_datasets_fp + '1.15645_kennicott_anderson_2019-hd.csv', 'Anderson 2019')],
'2.14297': [(hd_datasets_fp + '2.14297_moore2019-melt.csv', 'Moore 2019')],
'7.01044': [(hd_datasets_fp + '7.01044_lukas2005-hd.csv', 'Lukas 2005')],
'7.01107': [(hd_datasets_fp + '7.01107_lukas2005-hd.csv', 'Lukas 2005')],
'11.00106': [(hd_datasets_fp + '11.00106_kellerer2008-hd.csv', 'Kellerer 2008')],
'11.01450': [(hd_datasets_fp + '11.01450_grosseraletsch_anderson2019-hd.csv', 'Anderson 2020')],
'11.01509': [(hd_datasets_fp + '11.01509_oberaar_anderson2019-hd.csv', 'Anderson 2020')],
'11.01604': [(hd_datasets_fp + '11.01604_delGobbo2017-hd.csv', 'Del Gobbo 2017')],
'11.01827': [(hd_datasets_fp + '11.01827_oberaletsch_anderson2019-hd.csv', 'Anderson 2020')],
'11.02472': [(hd_datasets_fp + '11.02472_bocchiola2015-melt.csv', 'Bocchiola 2015')],
'11.02749': [(hd_datasets_fp + '11.02749_cheilon_anderson2019-hd.csv', 'Anderson 2020')],
'11.02771': [(hd_datasets_fp + '11.02771_piece_anderson2019-hd.csv', 'Anderson 2020')],
'11.02796': [(hd_datasets_fp + '11.02796_brenay_anderson2019-hd.csv', 'Anderson 2020')],
'11.02810': [(hd_datasets_fp + '11.02810_reid2012-hd.csv', 'Reid 2012')],
'11.03005': [(hd_datasets_fp + '11.03005_foster2012-hd.csv', 'Foster 2012'),
(hd_datasets_fp + '11.03005_mihalcea2008-hd.csv', 'Mihalcea 2008'),
(hd_datasets_fp + '11.03005_miage_anderson2019-hd.csv', 'Anderson 2020')],
'12.01132': [(hd_datasets_fp + '12.01132_popovnin2002_layers-hd.csv', 'Popovnin 2002')],
'13.43165': [(hd_datasets_fp + '13.43165_wang2011-hd.csv', 'Wang 2011')],
'13.43174': [(hd_datasets_fp + '13.43174_wang2011-hd.csv', 'Wang 2011')],
'13.43207': [(hd_datasets_fp + '13.43207_wang2011-hd.csv', 'Wang 2011')],
'13.43232': [(hd_datasets_fp + '13.43232_zhen2019-hd.csv', 'Zhen 2019'),
(hd_datasets_fp + '13.43232_haidong2006-hd.csv', 'Haidong 2006')],
'14.06794': [(hd_datasets_fp + '14.06794_mihalcea2006-melt.csv', 'Mihalcea 2006'),
(hd_datasets_fp + '14.06794_minora2015-hd.csv', 'Minora 2015')],
'14.15536': [(hd_datasets_fp + '14.15536_banerjee2018-hd.csv', 'Banerjee 2018')],
'14.16042': [(hd_datasets_fp + '14.16042_patel2016-hd.csv', 'Patel 2016')],
'15.03473': [(hd_datasets_fp + '15.03473_nicholson2012-hd.csv', 'Nicholson 2012'),
# (hd_datasets_fp + '15.03473_nicholson2017-hd.csv', 'Nicholson 2017'),
(hd_datasets_fp + '15.03473_nicholson2018_gokyo-hd.csv', 'Nicholson 2018'),
(hd_datasets_fp + '15.03473_nicholson2018_margin-hd.csv', 'Nicholson 2018')],
'15.03733': [(hd_datasets_fp + '15.03733_gibson-hd.csv', 'Gibson 2014')],
'15.03743': [(hd_datasets_fp + '15.03743_rounce2014-hd.csv', 'Rounce 2014')],
'15.04045': [(hd_datasets_fp + '15.04045_mccarthy2017-hd.csv', 'McCarthy 2017')],
'15.07122': [(hd_datasets_fp + '15.07122_shah2019-hd.csv', 'Shah 2019')],
'15.07886': [(hd_datasets_fp + '15.07886_zhang2011-hd.csv', 'Zhang 2011')],
'15.11758': [(hd_datasets_fp + '15.11758_yang2010-hd.csv', 'Yang 2010')],
'17.13720': [(hd_datasets_fp + '17.13720_ayala2016-hd.csv', 'Ayala 2016')],
'18.02397': [(hd_datasets_fp + '18.02397_brook2013-hd.csv', 'Brook 2013'),
(hd_datasets_fp + '18.02397_brook2013-hd_map_binned.csv', 'Brook 2013')],
}
for glacier in glaciers:
print('\n')
for hd_ds_info in hd_ds_dict[glacier]:
# Load observations
hd_ds_fn, ds_name = hd_ds_info[0], hd_ds_info[1]
hd_obs = pd.read_csv(hd_ds_fn)
# remove clean ice values because only comparing to debris-covered areas
hd_obs = hd_obs[hd_obs['hd_m'] > 0]
hd_obs.reset_index(inplace=True, drop=True)
# track to record how many are discarded
if 'n_obs' in hd_obs.columns:
total_obs_count_all = total_obs_count_all + hd_obs['n_obs'].sum()
else:
total_obs_count_all = total_obs_count_all + hd_obs.shape[0]
# if lat/lon provided, remove "off-glacier" points, i.e., points not covered by debris cover maps
if 'hd_ts_cal' in hd_obs.columns:
hd_obs = hd_obs.dropna(subset=['hd_ts_cal'])
hd_obs.reset_index(inplace=True, drop=True)
glac_str = hd_ds_fn.split('/')[-1].split('_')[0]
reg = int(glac_str.split('.')[0])
glacno = int(glac_str.split('.')[1])
# Load modeled debris thickness
try:
hdts_fp = debris_prms.mb_binned_fp_wdebris_hdts
hdts_fn = glac_str + '_mb_bins_hdts.csv'
hdts_df = pd.read_csv(hdts_fp + hdts_fn)
except:
hdts_fp = debris_prms.mb_binned_fp_wdebris_hdts + '../_wdebris_hdts_extrap/'
if reg < 10:
hdts_fn = (glac_str.split('.')[0].zfill(2) + '.' + glac_str.split('.')[1] +
'_mb_bins_hdts_extrap.csv')
else:
hdts_fn = glac_str + '_mb_bins_hdts_extrap.csv'
hdts_df = pd.read_csv(hdts_fp + hdts_fn)
hdts_df.loc[:,:] = hdts_df.values.astype(np.float64)
# ===== PROCESS BINNED DATA =====
if not hd_obs['elev'].isnull().any() and 'hd_m_std' not in hd_obs.columns:
# Bins
zmin = hd_obs.elev.min()
zmax = hd_obs.elev.max()
zbincenter_min = hdts_df.loc[0,'bin_center_elev_m']
zbincenter_max = hdts_df.loc[hdts_df.shape[0]-1,'bin_center_elev_m']
# Find minimum bin
while zbincenter_min - bin_width / 2 + bin_width < zmin:
zbincenter_min += bin_width
# Find maximum bin size
while zbincenter_max - bin_width /2 > zmax:
zbincenter_max -= bin_width
# Compute relevant statistics for each bin
hd_compare_all_array = None
for nbin, zbincenter in enumerate(np.arange(zbincenter_min, zbincenter_max+bin_width/2, bin_width)):
zbincenter_min = zbincenter - bin_width/2
zbincenter_max = zbincenter + bin_width/2
elev_idx_obs = np.where((hd_obs['elev'].values >= zbincenter_min) &
(hd_obs['elev'].values < zbincenter_max))[0]
obs_count = 0
if len(elev_idx_obs) > 0:
# Observations
hd_obs_subset = hd_obs.loc[elev_idx_obs,'hd_m']
hd_bin_med = np.median(hd_obs_subset)
hd_bin_mad = np.median(abs(hd_obs_subset - np.median(hd_obs_subset)))
obs_count += hd_obs_subset.shape[0]
# Model
hdts_df_elevs = hdts_df['bin_center_elev_m'].values
hdts_df_idxs = np.where((hdts_df_elevs >= zbincenter_min) &
(hdts_df_elevs < zbincenter_max))[0]
hdts_list = []
for hdts_df_idx in hdts_df_idxs:
bin_hdts_center = hdts_df.loc[hdts_df_idx,'hd_ts_med_m']
bin_hdts_spread = hdts_df.loc[hdts_df_idx,'hd_ts_mad_m']
bin_hdts_count = int(hdts_df.loc[hdts_df_idx,'dc_bin_count_valid'])
# Randomly create thicknesses based on center and spread, but ensure mean is similar
hdts_list_single = np.random.normal(loc=bin_hdts_center, scale=bin_hdts_spread,
size=(bin_hdts_count))
while (abs(np.mean(hdts_list_single) - bin_hdts_center) > 0.005 or
abs(np.std(hdts_list_single) - bin_hdts_spread) > 0.01):
hdts_list_single = np.random.normal(loc=bin_hdts_center, scale=bin_hdts_spread,
size=(bin_hdts_count))
hdts_list.extend(hdts_list_single)
hdts_array = np.array(hdts_list)
hdts_df_med = np.median(hdts_array)
hdts_df_mad = median_abs_deviation(hdts_array)
# Ensure the glacier has data as well
if not np.isnan(hdts_df_med):
bin_dcarea_km2 = hdts_df.loc[hdts_df_idxs,'dc_bin_area_valid_km2'].sum()
total_obs_count = total_obs_count + obs_count
if obs_count > n_obs_min:
print(glac_str, int(zbincenter), obs_count,
' ', np.round(hd_bin_med,2), '+/-', np.round(hd_bin_mad,2), 'vs',
' ', np.round(hdts_df_med,2), '+/-', np.round(hdts_df_mad,2))
bin_data = np.array([reg, glacno, zbincenter, obs_count, hd_bin_med, hd_bin_mad,
hdts_df_med, hdts_df_mad, bin_dcarea_km2]).reshape(1,9)
if hd_compare_all_array is None:
hd_compare_all_array = bin_data
else:
hd_compare_all_array = np.concatenate((hd_compare_all_array, bin_data))
# ===== PROCESS SPECIFIC SITE/BIN =====
elif not hd_obs['elev'].isnull().any() and 'hd_m_std' in hd_obs.columns:
zbincenter_min = hdts_df.loc[0,'bin_center_elev_m']
zbincenter_max = hdts_df.loc[hdts_df.shape[0]-1,'bin_center_elev_m']
hd_compare_all_array = None
for nbin, zbincenter in enumerate(hd_obs.elev.values):
zmin = zbincenter - bin_width / 2
zmax = zbincenter + bin_width / 2
# Debris thickness observations
hd_bin_med = hd_obs.loc[nbin,'hd_m_med']
hd_bin_mad = hd_obs.loc[nbin,'hd_m_mad']
obs_count = hd_obs.loc[nbin,'n_obs']
# if bin within the model debris thicknesses, then compute relevant statistics
if zbincenter >= zbincenter_min and zbincenter <= zbincenter_max:
# Model
hdts_df_elevs = hdts_df['bin_center_elev_m'].values
hdts_df_idxs = np.where((hdts_df_elevs >= zmin) &
(hdts_df_elevs < zmax))[0]
hdts_list = []
for hdts_df_idx in hdts_df_idxs:
bin_hdts_center = hdts_df.loc[hdts_df_idx,'hd_ts_med_m']
bin_hdts_spread = hdts_df.loc[hdts_df_idx,'hd_ts_mad_m']
bin_hdts_count = int(hdts_df.loc[hdts_df_idx,'dc_bin_count_valid'])
if bin_hdts_count > 0:
# Randomly create thicknesses based on center and spread, but ensure mean is similar
hdts_list_single = np.random.normal(loc=bin_hdts_center, scale=bin_hdts_spread,
size=(bin_hdts_count))
while (abs(np.mean(hdts_list_single) - bin_hdts_center) > 0.005 or
abs(np.std(hdts_list_single) - bin_hdts_spread) > 0.01):
hdts_list_single = np.random.normal(loc=bin_hdts_center, scale=bin_hdts_spread,
size=(bin_hdts_count))
hdts_list.extend(hdts_list_single)
# Ensure the glacier has data as well
if len(hdts_list) > 0:
hdts_array = np.array(hdts_list)
hdts_df_med = np.median(hdts_array)
hdts_df_mad = median_abs_deviation(hdts_array)
bin_dcarea_km2 = hdts_df.loc[hdts_df_idxs,'dc_bin_area_valid_km2'].sum()
if obs_count > n_obs_min:
print(glac_str, int(zbincenter), obs_count,
' ', np.round(hd_bin_med,2), '+/-', np.round(hd_bin_mad,2), 'vs',
' ', np.round(hdts_df_med,2), '+/-', np.round(hdts_df_mad,2))
bin_data = np.array([reg, glacno, zbincenter, obs_count, hd_bin_med, hd_bin_mad,
hdts_df_med, hdts_df_mad, bin_dcarea_km2]).reshape(1,9)
if hd_compare_all_array is None:
hd_compare_all_array = bin_data
else:
hd_compare_all_array = np.concatenate((hd_compare_all_array, bin_data))
# ===== PROCESS ENTIRE GLACIER =====
else:
# Observations
if hd_obs.shape[0] > 1:
hd_bin_med = np.median(hd_obs['hd_m'])
hd_bin_mad = median_abs_deviation(hd_obs['hd_m'].values)
obs_count = len(hd_obs['hd_m'].values)
else:
hd_bin_med = hd_obs.loc[0,'hd_m_med']
hd_bin_mad = hd_obs.loc[0,'hd_m_mad']
obs_count = hd_obs.loc[0,'n_obs']
# Modeled debris thickness
hdts_idxs = np.where(hdts_df['dc_bin_count_valid'].values > 0)[0]
hdts_list = []
for hdts_idx in hdts_idxs:
bin_hd_center = hdts_df.loc[hdts_idx,'hd_ts_med_m']
bin_hd_spread = hdts_df.loc[hdts_idx,'hd_ts_mad_m']
bin_hd_count = int(hdts_df.loc[hdts_idx,'dc_bin_count_valid'])
# Randomly create thicknesses based on center and spread, but ensure mean is similar
hdts_list_single = np.random.normal(loc=bin_hd_center, scale=bin_hd_spread, size=(bin_hd_count))
while (abs(np.mean(hdts_list_single) - bin_hd_center) > 0.005 or
abs(np.std(hdts_list_single) - bin_hd_spread) > 0.01):
hdts_list_single = np.random.normal(loc=bin_hd_center, scale=bin_hd_spread,
size=(bin_hd_count))
hdts_list.extend(hdts_list_single)
bin_dcarea_km2 = hdts_df.loc[hdts_df_idxs,'dc_bin_area_valid_km2'].sum()
hdts_array = np.array(hdts_list)
hdts_df_med = np.median(hdts_array)
hdts_df_mad = median_abs_deviation(hdts_array)
zbincenter = ((hdts_df['dc_bin_count_valid'] * hdts_df['bin_center_elev_m']).sum() /
hdts_df['dc_bin_count_valid'].sum())
# Ensure the glacier has data as well
if not np.isnan(hdts_df_med):
if obs_count > n_obs_min:
print(glac_str, int(zbincenter), obs_count,
' ', np.round(hd_bin_med,2), '+/-', np.round(hd_bin_mad,2), 'vs',
' ', np.round(hdts_df_med,2), '+/-', np.round(hdts_df_mad,2))
bin_data = np.array([reg, glacno, zbincenter, obs_count, hd_bin_med, hd_bin_mad,
hdts_df_med, hdts_df_mad, bin_dcarea_km2]).reshape(1,9)
hd_compare_all_array = bin_data
# EXPORT DATASET
# hd_compare_all_cns = ['region', 'glacno', 'zbin', 'obs_count', 'hd_obs_med', 'hd_obs_mad',
# 'hd_ts_med_m', 'hd_ts_mad_m']
hd_compare_all_cns = ['region', 'glacno', 'zbin', 'obs_count', 'hd_obs_med', 'hd_obs_mad',
'hd_ts_med_m', 'hd_ts_mad_m', 'dc_bin_area_km2']
hd_compare_all = | pd.DataFrame(hd_compare_all_array, columns=hd_compare_all_cns) | pandas.DataFrame |
"""
Lotka's Law
===============================================================================
>>> from techminer2 import *
>>> directory = "data/"
>>> file_name = "sphinx/images/lotka.png"
>>> lotka_law(directory=directory).savefig(file_name)
.. image:: images/lotka.png
:width: 700px
:align: center
>>> lotka_law(directory=directory, plot=False)
Num Authors % ... Acum Num Documents % Acum Num Documents
0 1 0.16 % ... 5 2.04%
1 2 0.31 % ... 11 4.49%
2 35 5.48 % ... 49 20.0%
3 601 94.05 % ... 245 100.0%
<BLANKLINE>
[4 rows x 9 columns]
"""
import matplotlib.pyplot as plt
import pandas as pd
from ._read_records import read_filtered_records
from .column_indicators import column_indicators
def _lotka_core_authors(directory="./"):
"""
Returns a dataframe with the core analysis.
Parameters
----------
dirpath_or_records: str or list
path to the directory or the records object.
Returns
-------
pandas.DataFrame
Dataframe with the core authors of the records
"""
documents = read_filtered_records(directory)
documents = documents.copy()
z = column_indicators(column="authors", sep="; ", directory=directory)[
"num_documents"
]
authors_dict = {
author: num_docs for author, num_docs in zip(z.index, z) if not | pd.isna(author) | pandas.isna |
import requests
import pandas as pd
output_csv = 'data/chapter_info.csv'
url = 'https://harrypotter.fandom.com/wiki/List_of_chapters_in_the_Harry_Potter_novels'
book_names = ["Harry Potter and the Philosopher's Stone",
'Harry Potter and the Chamber of Secrets',
'Harry Potter and the Prisoner of Azkaban',
'Harry Potter and the Goblet of Fire',
'Harry Potter and the Order of the Phoenix',
'Harry Potter and the Half-Blood Prince',
'Harry Potter and the Deathly Hallows']
response = requests.get(url)
html = response.text
col_names = ['series_chapter_num', 'book_chapter_num', 'chapter_picture',
'chapter_title', 'chapter_start_date']
drop_cols = ['chapter_picture']
col_order = ['book_num', 'series_chapter_num', 'book_chapter_num',
'book_title', 'chapter_title', 'chapter_start_date']
chapter_dfs = pd.read_html(html, header=0)
clean_chapter_dfs = []
for i, chapter_df in enumerate(chapter_dfs):
chapter_df.columns = col_names
chapter_df = chapter_df.drop(columns=drop_cols)
chapter_df['book_num'] = i + 1
chapter_df['book_title'] = book_names[i]
chapter_df = chapter_df[col_order]
clean_chapter_dfs.append(chapter_df)
full_chapter_df = | pd.concat(clean_chapter_dfs) | pandas.concat |
"""
send a gRPC command that has streaming results
capture the results in the db as StoredResponse objects
"""
import copy
import uuid
from random import random
import math
import numpy as np
from django.conf import settings
from django.http import JsonResponse
from tworaven_apps.utils.static_keys import KEY_SUCCESS, KEY_DATA
from tworaven_apps.utils.json_helper import json_loads
from tworaven_apps.utils.proto_util import message_to_json
from tworaven_apps.utils.basic_response import (ok_resp, err_resp)
from tworaven_apps.utils.view_helper import get_json_error
from tworaven_apps.ta2_interfaces.ta2_connection import TA2Connection
from tworaven_apps.ta2_interfaces.models import \
(StoredRequest, StoredResponse)
from tworaven_apps.ta2_interfaces.stored_data_util import StoredRequestUtil
from tworaven_apps.ta2_interfaces.websocket_message import WebsocketMessage
from tworavensproject.celery import celery_app
from tworaven_apps.utils.random_info import get_timestamp_string
from tworaven_apps.utils.file_util import create_directory
import grpc
import core_pb2
from google.protobuf.json_format import \
(Parse, ParseError)
import shutil
from os import path
import os
import json
from d3m.container.dataset import Dataset
from sklearn.model_selection import train_test_split
import pandas as pd
from pandas.api.types import is_numeric_dtype
#
# Import Tasks to SearchSolutions/GetSearchSolutionsResults,
# FitSolution/GetFitSolutionResults,
# ScoreSolution/GetScoreSolutionResults
#
from tworaven_apps.ta2_interfaces.ta2_search_solutions_helper import \
SearchSolutionsHelper
from tworaven_apps.ta2_interfaces.ta2_fit_solution_helper import FitSolutionHelper
from tworaven_apps.ta2_interfaces.ta2_score_solution_helper import ScoreSolutionHelper
from tworaven_apps.user_workspaces.models import UserWorkspace
from tworaven_solver import approx_seconds, get_date
import csv
from dateutil import parser
from collections import deque
@celery_app.task(ignore_result=True)
def stream_and_store_results(raven_json_str, stored_request_id,
grpc_req_obj_name, grpc_call_name, **kwargs):
"""Make the grpc call which has a streaming response
grpc_req_obj_name: "core_pb2.GetSearchSolutionsResultsRequest", etc
grpc_call_name: "GetSearchSolutionsResults", etc
"""
core_stub, err_msg = TA2Connection.get_grpc_stub()
if err_msg:
StoredRequestUtil.set_error_status(stored_request_id, err_msg)
return
# optional: used to stream messages back to client via channels
#
websocket_id = kwargs.get('websocket_id', None)
#
grpc_req_obj = eval(grpc_req_obj_name)
grpc_rpc_call_function = eval('core_stub.%s' % grpc_call_name)
# --------------------------------
# convert the JSON string to a gRPC request
# Yes: done for the 2nd time
# --------------------------------
try:
req = Parse(raven_json_str,
grpc_req_obj())
except ParseError as err_obj:
err_msg = 'Failed to convert JSON to gRPC: %s' % (err_obj)
StoredRequestUtil.set_error_status(stored_request_id, err_msg)
return
# --------------------------------
# Send the gRPC request
# --------------------------------
msg_cnt = 0
try:
# -----------------------------------------
# Iterate through the streaming responses
# -----------------------------------------
for reply in grpc_rpc_call_function(\
req, timeout=settings.TA2_GRPC_LONG_TIMEOUT):
msg_cnt += 1
stored_resp = None # to hold a StoredResponse object
# -----------------------------------------
# parse the response
# -----------------------------------------
msg_json_str = message_to_json(reply)
msg_json_info = json_loads(msg_json_str)
# -----------------------------------------
# does it look ok?
# -----------------------------------------
if not msg_json_info.success:
print('PROBLEM HERE TO LOG!')
user_msg = 'failed to store response: %s' % \
msg_json_info.err_msg
ws_msg = WebsocketMessage.get_fail_message(\
grpc_call_name, user_msg, msg_cnt=msg_cnt)
ws_msg.send_message(websocket_id)
continue
# -----------------------------------------
# Looks good, save the response
# -----------------------------------------
stored_resp_info = StoredResponse.add_response(\
stored_request_id,
response=msg_json_info.result_obj)
# -----------------------------------------
# Make sure the response was saved (probably won't happen)
# -----------------------------------------
if not stored_resp_info.success:
# Not good but probably won't happen
# send a message to the user...
#
user_msg = 'failed to store response: %s' % \
msg_json_info.err_msg
ws_msg = WebsocketMessage.get_fail_message(\
grpc_call_name, user_msg, msg_cnt=msg_cnt)
ws_msg.send_message(websocket_id)
# Wait for the next response...
continue
# -----------------------------------------------
# send responses back to any open WebSockets
# ---------------------------------------------
if websocket_id:
stored_resp = stored_resp_info.result_obj
ws_msg = WebsocketMessage.get_success_message(\
grpc_call_name,
'it worked',
msg_cnt=msg_cnt,
data=stored_resp.as_dict())
print('ws_msg: %s' % ws_msg)
#print('ws_msg', ws_msg.as_dict())
ws_msg.send_message(websocket_id)
StoredResponse.mark_as_read(stored_resp)
# -----------------------------------------------
print('msg received #%d' % msg_cnt)
except grpc.RpcError as err_obj:
StoredRequestUtil.set_error_status(\
stored_request_id,
str(err_obj))
return
#except Exception as err_obj:
# StoredRequestUtil.set_error_status(\
# stored_request_id,
# str(err_obj))
# return
StoredRequestUtil.set_finished_ok_status(stored_request_id)
def rewrite_dataset_schema(problem, dataset_schema, all_variables, dataset_id, update_roles=False):
dataset_schema['about']['datasetID'] = dataset_id
resource_schema = next(i for i in dataset_schema['dataResources'] if i['resType'] == 'table')
# rewrite datasetDoc in output datasets if new problem metadata is supplied
keep_variables = list({
*problem.get('indexes', ['d3mIndex']),
*problem['predictors'],
*problem['targets']
})
ordering_column = problem.get('forecastingHorizon', {}).get("column")
if ordering_column and ordering_column not in keep_variables:
keep_variables.append(ordering_column)
# preserve column order, and only keep variables that already existed
keep_variables = sorted(list(i for i in keep_variables if i in all_variables), key=lambda x: all_variables.index(x))
passthrough_roles = [
'multiIndex', 'key', 'interval', 'boundingPolygon',
'edgeSource', "directedEdgeSource", "undirectedEdgeSource", "multiEdgeSource", "simpleEdgeSource",
"edgeTarget", "directedEdgeTarget", "undirectedEdgeTarget", "multiEdgeTarget", "simpleEdgeTarget"
]
map_roles = {
"indexes": 'index',
'predictors': 'attribute',
'targets': 'suggestedTarget',
'crossSection': 'suggestedGroupingKey',
'location': 'locationIndicator',
'boundary': 'boundaryIndicator',
'weights': 'instanceWeight',
'privileged': 'suggestedPriviligedData'
}
def do_update_roles(prev_roles, col_name):
roles = [i for i in prev_roles if i in passthrough_roles]
for role_name in map_roles:
if col_name in problem[role_name]:
roles.append(map_roles[role_name])
# if col_name == ordering_column and 'timeIndicator' not in col_schema['role']:
# col_schema['role'].append('timeIndicator')
return roles
def update_col_schema(i, col_name):
col_schema = next((col_schema for col_schema in resource_schema['columns'] if col_schema['colName'] == col_name), None)
if col_schema is None:
return
col_schema['colIndex'] = i
if update_roles:
col_schema['role'] = do_update_roles(col_schema['role'], col_name)
return col_schema
# modify in place
updated_schemas = []
for i, col_name in enumerate(keep_variables):
updated_schema = update_col_schema(i, col_name)
if updated_schema is None:
continue
updated_schemas.append(updated_schema)
resource_schema['columns'] = updated_schemas
# these are no longer necessarily valid after rewriting
resource_schema.pop("columnsCount", None)
dataset_schema.pop('metadata', None)
return keep_variables, dataset_schema
@celery_app.task()
def split_dataset(configuration, workspace):
# parse input data
split_options = configuration.get('split_options', {})
problem = configuration.get('problem')
dataset_schema = json.load(open(configuration['dataset_schema'], 'r'))
dataset_id = configuration['dataset_id']
# actual variable names in the data
all_variables = pd.read_csv(configuration['dataset_path'], nrows=1).columns.tolist()
# dataset schema will change because predictors and targets does not span all variables
# dataset id needs to be coordinated or else the TA2 will reject eventual produce calls on other datasets
keep_variables, dataset_schema = rewrite_dataset_schema(
problem=problem,
dataset_schema=dataset_schema,
all_variables=all_variables,
dataset_id=dataset_id,
update_roles=configuration.get('update_roles'))
# find the learningData resource, which is by convention the first table
resource_schema = next(i for i in dataset_schema['dataResources'] if i['resType'] == 'table')
cross_section_date_limits = None
inferred_freq = None
dtypes = {}
# WARNING: dates are assumed to be monotonically increasing
# this is to make it possible to support arbitrarily large datasets
if problem.get('taskType') == 'FORECASTING' and problem['forecastingHorizon']['column']:
time_column = problem['forecastingHorizon']['column']
time_format = problem.get('time_format', {}).get(time_column)
if time_format:
dtypes[time_column] = str
for cross_section in problem.get('crossSection', []):
dtypes[cross_section] = str
cross_section_date_limits = {}
time_buffer = []
candidate_frequencies = set()
# create generator that provides a data chunk on every iteration
data_file_generator = pd.read_csv(
configuration['dataset_path'],
chunksize=10 ** 5,
usecols=keep_variables,
dtype=dtypes)
infer_count = 0
for dataframe_chunk in data_file_generator:
if time_format:
dataframe_chunk[time_column] = dataframe_chunk[time_column].apply(
lambda x: get_date(x, time_format=time_format))
dataframe_chunk = dataframe_chunk.sort_values(by=[time_column])
for _, row in dataframe_chunk.iterrows():
# with open(configuration['dataset_path'], 'r') as infile:
# reader = csv.DictReader(infile)
# for row in reader:
date = row[time_column]
if not date:
continue
# limit the number of times frequency is inferred
if infer_count < 1000:
buffer_is_empty = len(time_buffer) == 0
date_is_newer = len(time_buffer) > 0 and time_buffer[-1] < date
if buffer_is_empty or date_is_newer:
time_buffer.append(date)
if len(time_buffer) > 3:
del time_buffer[0]
# at minimum three time points are needed to infer a date offset frequency
if len(time_buffer) == 3:
infer_count += 1
if time_format:
try:
candidate_frequency = pd.infer_freq(time_buffer)
if candidate_frequency:
candidate_frequencies.add(candidate_frequency)
except Exception:
# pandas._libs.tslibs.np_datetime.OutOfBoundsDatetime
pass
else:
candidate_frequencies.add(abs(time_buffer[1] - time_buffer[0]))
# collect the highest date within each cross section
section = tuple(row[col] for col in problem.get('crossSection', []))
cross_section_date_limits.setdefault(section, date)
cross_section_date_limits[section] = max(cross_section_date_limits[section], date)
# if data has a trio of evenly spaced records
if candidate_frequencies:
if time_format:
# sort inferred frequency by approximate time durations, select shortest
inferred_freq = sorted([(i, approx_seconds(i)) for i in candidate_frequencies], key=lambda x: x[1])[0][0]
inferred_freq = pd.tseries.frequencies.to_offset(inferred_freq)
else:
inferred_freq = min(candidate_frequencies)
# create new directory structure for the data in a role
def get_dataset_paths(role):
dest_dir_info = create_destination_directory(workspace, name=role)
if not dest_dir_info[KEY_SUCCESS]:
return JsonResponse(get_json_error(dest_dir_info.err_msg))
dest_directory = dest_dir_info[KEY_DATA]
# copy all files, remove the csv_path (we'll be overwriting it)
csv_path = path.join(dest_directory, resource_schema['resPath'])
shutil.rmtree(dest_directory)
shutil.copytree(workspace.d3m_config.training_data_root, dest_directory)
os.remove(csv_path)
role_dataset_schema_path = path.join(dest_directory, 'datasetDoc.json')
role_dataset_schema = copy.deepcopy(dataset_schema)
with open(role_dataset_schema_path, 'w') as dataset_schema_file:
json.dump(role_dataset_schema, dataset_schema_file)
return role_dataset_schema_path, role_dataset_schema, csv_path
all_datasetDoc_path, all_datasetDoc, all_datasetCsv = get_dataset_paths('all')
dataset_schemas = {'all': all_datasetDoc}
dataset_schema_paths = {'all': all_datasetDoc_path}
dataset_paths = {'all': all_datasetCsv}
# indicates if data was successfully stratified (prone to failure on highly imbalanced data)
dataset_stratified = {}
if split_options.get('outOfSampleSplit'):
train_datasetDoc_path, train_datasetDoc, train_datasetCsv = get_dataset_paths('train')
test_datasetDoc_path, test_datasetDoc, test_datasetCsv = get_dataset_paths('test')
dataset_schemas = {
**dataset_schemas,
'train': train_datasetDoc,
'test': test_datasetDoc
}
dataset_schema_paths = {
**dataset_schema_paths,
'train': train_datasetDoc_path,
'test': test_datasetDoc_path
}
dataset_paths = {
**dataset_paths,
'train': train_datasetCsv,
'test': test_datasetCsv,
}
dataset_stratified = {
'train': split_options.get('stratified'),
'test': split_options.get('stratified')
}
DEFAULT_TRAIN_TEST_RATIO = .7
train_test_ratio = split_options.get('trainTestRatio', DEFAULT_TRAIN_TEST_RATIO)
if not (0 < train_test_ratio <= 1):
raise ValueError("train-test ratio must be between 0 and 1")
random_seed = split_options.get('randomSeed', 0)
# traverse the entire file for a row count. Informs the sampling ratios when chunking
with open(configuration['dataset_path'], 'r') as infile:
_header_line = next(infile)
row_count = sum(1 for _ in infile)
# write out blank csv files for each split
for split_name in ['train', 'test', 'all']:
pd.DataFrame(data=[], columns=keep_variables).to_csv(dataset_paths[split_name], index=False, quoting=csv.QUOTE_NONNUMERIC)
# by default, the split is trivially forever None, which exhausts all zips
splits_file_generator = iter(lambda: None, 1)
if split_options.get('splitsDir') and split_options.get('splitsFile'):
splits_file_path = f"{split_options['splitsDir']}/{split_options['splitsFile']}"
splits_file_generator = pd.read_csv(splits_file_path, chunksize=10 ** 5)
data_file_generator = pd.read_csv(
configuration['dataset_path'],
chunksize=10 ** 5,
usecols=keep_variables,
dtype=dtypes)
row_count_chunked = 0
# TODO: adjust chunksize based on number of columns
for dataframe, dataframe_split in zip(
data_file_generator,
splits_file_generator):
# rows with NaN values become object rows, which may contain multiple types. The NaN values become empty strings
# this converts '' to np.nan in non-nominal columns, so that nan may be dropped
# nominals = problem.get('categorical', [])
# for column in [col for col in problem['targets'] if col not in nominals]:
# dataframe[column].replace('', np.nan, inplace=True)
# drop null values in target variables
if 'SEMISUPERVISED' not in problem['taskType'] and problem['taskType'] != 'CLUSTERING':
nominals = problem.get('categorical', [])
for column in [col for col in problem['targets'] if col not in nominals]:
dataframe[column].replace('', np.nan, inplace=True)
dataframe.dropna(inplace=True, subset=problem['targets'])
dataframe.reset_index(drop=True, inplace=True)
# max count of each split ['all', 'test', 'train']
max_count = int(split_options.get('maxRecordCount', 5e4))
chunk_count = len(dataframe)
sample_count = int(max_count / row_count * chunk_count)
if split_options.get('outOfSampleSplit'):
if dataframe_split:
train_indices = set(dataframe_split[dataframe_split['type'] == 'TRAIN']['d3mIndex'].tolist())
test_indices = set(dataframe_split[dataframe_split['type'] == 'TEST']['d3mIndex'].tolist())
splits = {
'train': dataframe[dataframe['d3mIndex'].astype(int).isin(train_indices)],
'test': dataframe[dataframe['d3mIndex'].astype(int).isin(test_indices)],
'stratified': False
}
# split dataset along temporal variable
elif problem['taskType'] == 'FORECASTING':
horizon = problem.get('forecastingHorizon', {}).get('value', 10)
if not horizon:
horizon = 10
if cross_section_date_limits and inferred_freq:
time_column = problem['forecastingHorizon']['column']
time_format = problem.get('time_format', {}).get(time_column)
cross_section_max_count = int(max_count / len(cross_section_date_limits))
horizon = min(cross_section_max_count, horizon)
def in_test(row):
section = tuple(row[col] for col in problem.get('crossSection', []))
date = row[time_column]
if time_format:
date = get_date(date, time_format)
if not date:
return False
max_date = cross_section_date_limits[section]
# print('interval:', max_date - inferred_freq * horizon, max_date)
return max_date - inferred_freq * horizon <= date <= max_date
def in_train(row):
section = tuple(row[col] for col in problem.get('crossSection', []))
date = row[time_column]
if time_format:
date = get_date(date, time_format)
if not date:
return False
max_date = cross_section_date_limits[section] - inferred_freq * horizon
# TODO: lower bound isn't being set, due to risk of time underflow
return date < max_date
splits = {
'train': dataframe.loc[dataframe.apply(in_train, axis=1)],
'test': dataframe.loc[dataframe.apply(in_test, axis=1)],
'stratified': False
}
else:
train_idx_min = row_count - max_count - horizon
test_idx_min = row_count - horizon
def clamp(idx):
return min(len(dataframe), max(0, int(idx)))
splits = {
'train': dataframe.iloc[
clamp(train_idx_min - row_count_chunked)
:clamp(test_idx_min - row_count_chunked)],
'test': dataframe.iloc[
clamp(test_idx_min - row_count_chunked)
:clamp(len(dataframe))],
'stratified': False
}
else:
shuffle = split_options.get('shuffle', True)
stratified = split_options.get('stratified')
def run_split():
try:
dataframe_train, dataframe_test = train_test_split(
dataframe,
train_size=train_test_ratio,
shuffle=shuffle,
stratified=stratified,
random_state=random_seed)
return {'train': dataframe_train, 'test': dataframe_test, 'stratified': stratified}
except TypeError:
dataframe_train, dataframe_test = train_test_split(
dataframe,
shuffle=shuffle,
train_size=train_test_ratio,
random_state=random_seed)
return {'train': dataframe_train, 'test': dataframe_test, 'stratified': False}
splits = run_split()
for split_name in ['train', 'test']:
if problem['taskType'] != 'FORECASTING':
if sample_count < len(splits[split_name]):
splits[split_name] = splits[split_name].sample(sample_count)
splits[split_name].to_csv(
dataset_paths[split_name],
mode='a', header=False, index=False,
quoting=csv.QUOTE_NONNUMERIC)
dataset_stratified[split_name] = dataset_stratified[split_name] and splits['stratified']
row_count_chunked += len(dataframe)
if problem['taskType'] != 'FORECASTING':
if sample_count < chunk_count:
dataframe = dataframe.sample(sample_count)
dataframe.to_csv(dataset_paths['all'], mode='a', header=False, index=False, quoting=csv.QUOTE_NONNUMERIC)
def get_mode(x):
mode = | pd.Series.mode(x) | pandas.Series.mode |
import pandas as pd
from bld.project_paths import project_paths_join as ppj
# Read the dataset.
adults2005 = pd.read_stata(ppj("IN_DATA", "vp.dta"))
adults2009 = pd.read_stata(ppj("IN_DATA", "zp.dta"))
adults2013 = pd.read_stata(ppj("IN_DATA", "bdp.dta"))
# Extract Column of Big 5 Variables we need for the research.
big_adults_2005 = adults2005.loc[:, 'vp12501':'vp12515']
big_adults_2009 = adults2009.loc[:, 'zp12001':'zp12015']
big_adults_2013 = adults2013.loc[:, 'bdp15101':'bdp15115']
# Rename to meaningful names. (Big Five)
for big_five in [big_adults_2005, big_adults_2009, big_adults_2013]:
big_five.columns = ['work_carefully', 'communicative', 'abrasive',
'new_idea', 'often_worry',
'forgiving_nature', 'lazy', 'outgoing',
'esthetics', 'often_nervous', 'work_efficiently',
'reserved', 'considerate', 'lively_imagination',
'be_relaxed']
# Extract Column of basic variables we need for the research.
ids2005 = adults2005.loc[:, ['hhnr', 'persnr', 'welle', 'vp14701',
'vp14702', 'vp135']]
ids2009 = adults2009.loc[:, ['hhnr', 'persnr', 'welle', 'zp12901',
'zp12902', 'zp137']]
ids2013 = adults2013.loc[:, ['hhnr', 'persnr', 'welle', 'bdp13401',
'bdp13403', 'bdp143']]
# Rename identifiers to match the other data sets.
ids2005.columns = ['cid', 'pid_parents', 'syear', 'sex_parent_2005',
'birth_year_parent_2005', 'german_nationality_2005']
ids2009.columns = ['cid', 'pid_parents', 'syear', 'sex_parent_2009',
'birth_year_parent_2009', 'german_nationality_2009']
ids2013.columns = ['cid', 'pid_parents', 'syear', 'sex_parent_2013',
'birth_year_parent_2013', 'german_nationality_2013']
# Merge the ids with big five or LoC variable.
data_adults_2005 = pd.concat([ids2005, big_adults_2005],
axis=1)
data_adults_2009 = pd.concat([ids2009, big_adults_2009], axis=1)
data_adults_2013 = pd.concat([ids2013, big_adults_2013], axis=1)
# Create a dataframe merge all the data.
data_adults_whole = pd.concat([data_adults_2005, data_adults_2009,
data_adults_2013,
], sort=False)
data_adults = data_adults_whole.reset_index(drop=True)
# Replace all negative number into pd.np.nan.
dict_n = {'[-1] keine Angabe': pd.np.nan}
data_adults_nan = data_adults.replace(dict_n)
# Replace all string variable we use into number.
dict_adults_f = {
'[7] Trifft voll zu': 7,
'[1] Trifft ueberhaupt nicht zu': 1,
'[7] 7 stimme voll zu, (Skala 1-7)': 7,
'[6] 6 auf Skala 1-7': 6,
'[5] 5 auf Skala 1-7': 5,
'[4] 4 auf Skala 1-7': 4,
'[3] 3 auf Skala 1-7': 3,
'[2] 2 auf Skala 1-7': 2,
'[1] 1 stimme ueberhaupt nicht zu, (Skala 1-7': 1,
'[1] Ja': 1,
'[2] Nein': 0,
'[-5] In Fragebogenversion nicht enthalten': pd.np.nan,
'[7] 7 Stimme voll zu, (Skala 1-7)': 7,
'[1] 1 Stimme ueberhaupt nicht zu, (Skala 1-7)': 1}
data_adults_replace = data_adults_nan.replace(dict_adults_f)
# Reserves the scale for 'Negative' items.
# Create list of 'Negavie' items and dictionary for things I want to replace.
# Create a dict of number I want to replace.
# Replace the number by creating new DataFrame and update.
dict_adults_r = {1: 7, 7: 1, 2: 6, 6: 2, 3: 5, 5: 3}
negative = ['little_control', 'lazy', 'abrasive', 'reserved', 'be_relaxed']
reverse = data_adults_replace.loc[:, negative].replace(dict_adults_r)
data_adults_replace.update(reverse)
data_adults_clean = data_adults_replace.replace(r'\s+', pd.np.nan, regex=True)
# Create list of variable corresond to Big Five.
openness_ls = ['lively_imagination', 'new_idea', 'esthetics']
conscientiousness_ls = ['lazy', 'work_efficiently', 'work_carefully']
extraversion_ls = ['reserved', 'work_efficiently', 'work_carefully']
agreeableness_ls = ['forgiving_nature', 'considerate', 'abrasive']
neuroticism_ls = ['often_worry', 'often_nervous', 'be_relaxed']
# Create list of coloumns we want to create.
trait_ls = [neuroticism_ls, agreeableness_ls,
extraversion_ls, conscientiousness_ls, openness_ls]
trait = ['neuroticism', 'agreeableness',
'extraversion', 'conscientiousness', 'openness']
# Define a loop for creating the measure.
for x, y in zip(trait, trait_ls):
data_adults_clean[x] = data_adults_clean[y].mean(1)
# Create function for standardising the data.
def standardise(x): return (x - x.mean()) / x.std()
# Standardise all the measure and create as dataframe.
trait = data_adults_clean[trait].pipe(standardise)
# Merge the measure with id.
measures_adults = | pd.concat([data_adults_replace, trait], axis=1) | pandas.concat |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: | pd.Timestamp("2012-12-12 00:00:00") | pandas.Timestamp |
import pandas as pd
import numpy as np
import json
import csv
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm_notebook as tqdm
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
from nltk.stem import WordNetLemmatizer
import nltk
nltk.download('averaged_perceptron_tagger')
import spacy
import math
import string
import sys
import random
from collections import Counter
from itertools import chain
stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
ge=pd.read_csv('./testgoodwordse.csv')
gn=pd.read_csv('./testgoodwordsn.csv')
gc=pd.read_csv('./testgoodwordsc.csv')
be=pd.read_csv('./testbadwordse.csv')
bn= | pd.read_csv('./testbadwordsn.csv') | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[3]:
import pandas as pd
from tqdm import tqdm_notebook
prefix = '../data/domlin_fever/'
# In[4]:
train_labels = pd.read_table(prefix + 'train-domlin.label', header=None)
train_hypothesis = pd.read_table(prefix + 'train-domlin.hypothesis', header=None)
train_premise = pd.read_table(prefix + 'train-domlin.premise', header=None)
dev_labels = pd.read_table(prefix + 'dev-domlin.label', header=None)
dev_hypothesis = pd.read_table(prefix + 'dev-domlin.hypothesis', header=None)
dev_premise = pd.read_table(prefix + 'dev-domlin.premise', header=None)
test_hypothesis = pd.read_table(prefix + 'test-domlin.hypothesis', header=None)
test_premise = pd.read_table(prefix + 'test-domlin.premise', header=None)
# In[18]:
text_a_train = train_hypothesis[0].str.replace(r'\n', ' ', regex=True)
text_a_train = text_a_train.str.replace('-LRB-', '(')
text_a_train = text_a_train.str.replace('-RRB-', ')')
text_a_train = text_a_train.str.replace('-COLON-', ':')
text_b_train = train_premise[0].str.replace(r'\n', ' ', regex=True)
text_b_train = text_b_train.str.replace('-LRB-', '(')
text_b_train = text_b_train.str.replace('-RRB-', ')')
text_b_train = text_b_train.str.replace('-COLON-', ':')
train_df = pd.DataFrame({
'id':range(len(train_labels)),
'label':train_labels[0],
'alpha':['a']*train_labels.shape[0],
'text_a': text_a_train,
'text_b': text_b_train
})
train_df.head()
print(train_df.label.value_counts())
count_class_1, count_class_0, count_class_2 = train_df.label.value_counts()
df_class_0 = train_df[train_df['label'] == 0]
df_class_1 = train_df[train_df['label'] == 1]
df_class_2 = train_df[train_df['label'] == 2]
df_class_1_under = df_class_1.sample(count_class_2, replace=True)
df_class_0_under = df_class_0.sample(count_class_2, replace=True)
train_df_new = | pd.concat([df_class_0_under, df_class_1_under, df_class_2], axis=0) | pandas.concat |
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_unequal():
"""I want datetime fields to match with dates stored as strings
"""
df = pd.DataFrame([{"a": "2017-01-01", "b": "2017-01-02"}, {"a": "2017-01-01"}])
df["a_dt"] = pd.to_datetime(df["a"])
df["b_dt"] = pd.to_datetime(df["b"])
assert datacompy.columns_equal(df.a, df.a_dt).all()
assert datacompy.columns_equal(df.b, df.b_dt).all()
assert datacompy.columns_equal(df.a_dt, df.a).all()
assert datacompy.columns_equal(df.b_dt, df.b).all()
assert not datacompy.columns_equal(df.b_dt, df.a).any()
assert not datacompy.columns_equal(df.a_dt, df.b).any()
assert not datacompy.columns_equal(df.a, df.b_dt).any()
assert not datacompy.columns_equal(df.b, df.a_dt).any()
def test_bad_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[{"a": "2017-01-01", "b": "2017-01-01"}, {"a": "2017-01-01", "b": "217-01-01"}]
)
df["a_dt"] = pd.to_datetime(df["a"])
assert not datacompy.columns_equal(df.a_dt, df.b).any()
def test_rounded_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.000000", "exp": True},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.123456", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:01.000000", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00", "exp": True},
]
)
df["a_dt"] = pd.to_datetime(df["a"])
actual = datacompy.columns_equal(df.a_dt, df.b)
expected = df["exp"]
assert_series_equal(actual, expected, check_names=False)
def test_decimal_float_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": False},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
| assert_series_equal(expect_out, actual_out, check_names=False) | pandas.util.testing.assert_series_equal |
import pandas as pd
import seaborn as sb
from sklearn.linear_model import LinearRegression
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.seasonal import seasonal_decompose
import numpy as np
import pmdarima
import matplotlib.pyplot as plt
import seaborn as sb
import matplotlib.colors
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.metrics import mean_squared_error
import sklearn.metrics as metrics
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import silhouette_score
import tsfel
#MAIN
size=(7,5)
plt.style.use('seaborn')
sb.set_style("darkgrid")
color_list = ['blue','red','green']
#cmap = matplotlib.colors.LinearSegmentedColormap.from_list(cluster_values, color_list)
#IMPORT
df=pd.read_csv('Data kategorier.csv',skiprows=2,index_col=['READ_TIME_Date'])
df.index = pd.to_datetime(df.index)
# 1) Weekday vs Weekend for apartments and houses
Houses=df.iloc[:,5:8]
Houses.columns=['sum','antal','average']
Houses['Type']='House'
Apartments=df.iloc[:,92:95]
Apartments.columns=['sum','antal','average']
Apartments['Type']='Apartment'
data=pd.concat([Houses,Apartments])
data['dayn']=data.index.weekday
data['Day']=data.dayn.apply(lambda x: 'weekend' if x>=5 else 'weekday')
plt.figure(figsize=size)
ax=sb.lineplot(data.index.hour,data.average,hue=data.Type,style=data.Day)
ax.set(xlabel='Hour', ylabel='Average household consumption [kW]')
plt.xlabel=('Hour')
plt.ylabel=('Average household consumption [kW]')
plt.show()
# 2) Age over months
A1 = df.loc[:,df.columns.isin(k for k in df.columns if 'Average' in k and 'a1' in k)]
A1['Average']=A1.mean(axis=1)
A1['Age']='18-30'
A2 = df.loc[:,df.columns.isin(k for k in df.columns if 'Average' in k and 'a2' in k)]
A2['Average']=A2.mean(axis=1)
A2['Age']='30-65'
A3 = df.loc[:,df.columns.isin(k for k in df.columns if 'Average' in k and 'a3' in k)]
A3['Average']=A3.mean(axis=1)
A3['Age']='65+'
data=pd.concat([A1.iloc[:,-2:],A2.iloc[:,-2:],A3.iloc[:,-2:]])
data['Month']=data.index.month_name(locale='French')
data['Monthn']=data.index.month
data['Season']=data.Monthn.apply(lambda x: 'Winter' if x in [12,1,2] else('Spring' if x in [3,4,5] else('Summer' if x in [6,7,8] else ('Fall'))))
plt.figure(figsize=size)
ax=sb.lineplot(data.index.hour,data.Average,hue=data.Age,style=data.Season, ci=None)
ax.set(xlabel='Hour', ylabel='Average household consumption [kW]')
plt.show()
# 3) Number of children vs Weekend and age 30-64
C0 = df.loc[:,df.columns.isin(k for k in df.columns if 'Average' in k and '0b' in k and 'a2' in k)]
C0['Average']=C0.mean(axis=1)
C0['Children']='No'
C1 = df.loc[:,df.columns.isin(k for k in df.columns if 'Average' in k and 'mb' in k and 'a2' in k)]
C1['Average']=C1.mean(axis=1)
C1['Children']='Yes'
data=pd.concat([C0.iloc[:,-2:],C1.iloc[:,-2:]])
data['dayn']=data.index.weekday
data['Day']=data.dayn.apply(lambda x: 'weekend' if x>=5 else 'weekday')
plt.figure(figsize=(10,5))
ax = plt.subplot(122)
ax.xaxis.set_major_locator(plt.MaxNLocator(6))
ax = sb.lineplot(data.index.hour,data.Average,hue=data.Children,style=data.Day, ci=None)
ax.set(xlabel='Hour', ylabel='')
plt.axvline(x=18)
ax.title.set_text('Age: 30-64')
plt.show()
#Age 18-30
C0 = df.loc[:,df.columns.isin(k for k in df.columns if 'Average' in k and '0b' in k and 'a1' in k)]
C0['Average']=C0.mean(axis=1)
C0['Children']='No'
C1 = df.loc[:,df.columns.isin(k for k in df.columns if 'Average' in k and 'mb' in k and 'a1' in k)]
C1['Average']=C1.mean(axis=1)
C1['Children']='Yes'
data1=pd.concat([C0.iloc[:,-2:],C1.iloc[:,-2:]])
data1['dayn']=data1.index.weekday
data1['Day']=data1.dayn.apply(lambda x: 'weekend' if x>=5 else 'weekday')
ax1 = plt.subplot(121, sharey = ax)
ax1.xaxis.set_major_locator(plt.MaxNLocator(6))
ax1 = sb.lineplot(data1.index.hour,data1.Average,hue=data1.Children,style=data1.Day, ci=None)
ax1.set(xlabel='Hour', ylabel='Average household consumption [kW]')
plt.axvline(x=18)
ax1.title.set_text('Age: 18-30')
plt.show()
#Number of adults
H1 = df.loc[:,df.columns.isin(k for k in df.columns if 'Average' in k and 'a1' in k)]
H1['Average']=C0.mean(axis=1)
H1['Adults']='One'
H2=df.loc[:,df.columns.isin(k for k in df.columns if 'Average' in k and 'h2' in k)]
H2['Average']=C1.mean(axis=1)
H2['Adults']='Two'
data= | pd.concat([H1,H2]) | pandas.concat |
import numpy as np
import pandas as pd
import fiona
import io
from shapely import geometry
import click
from wit_tooling import query_wit_data
def shape_list(key, values, shapefile):
"""
Get a generator of shapes from the given shapefile
key: the key to match in 'properties' in the shape file
values: a list of property values
shapefile: the name of your shape file
e.g. key='ORIGID', values=[1, 2, 3, 4, 5],
shapefile='/g/data/r78/DEA_Wetlands/shapefiles/MDB_ANAE_Aug2017_modified_2019_SB_3577.shp'
"""
count = len(values)
with fiona.open(shapefile) as allshapes:
for shape in allshapes:
shape_id = shape['properties'].get(key)
if shape_id is None:
continue
if isinstance(shape_id, float):
shape_id = int(shape_id)
if shape_id in values:
yield(shape_id, shape)
count -= 1
if count <= 0:
break
def get_areas(features, pkey='SYSID'):
"""
Calculate the area of a list/generator of shapes
input:
features: a list of shapes indexed by the key
output:
a dataframe of area index by the key
"""
re = pd.DataFrame()
for f in features:
va = pd.DataFrame([[f[0], geometry.shape(f[1]['geometry']).area/1e4]], columns=[pkey, 'area'])
re = re.append(va, sort=False)
return re.set_index(pkey)
def dump_wit_data(key, feature_list, output, batch=-1):
"""
dump wit data from the database into a file
input:
key: Name to id the polygon
feature_list: a list or generator of features
output:
a csv file to save all the wit data
"""
count = 0
if batch > 0:
fname = output.split('.')[0]
sub_fname = fname + '_0.csv'
appendix = 0
else:
sub_fname = output
for f_id, f in feature_list:
_, wit_data = query_wit_data(f)
csv_buf = io.StringIO()
wit_df = pd.DataFrame(data=wit_data, columns=['TIME', 'BS', 'NPV', 'PV', 'WET', 'WATER'])
wit_df.insert(0, key, f_id)
wit_df.to_csv(csv_buf, index=False, header=False)
csv_buf.seek(0)
with open(sub_fname, 'a') as f:
f.write(csv_buf.read())
if batch < 0:
continue
count += 1
if count >= batch:
with open(sub_fname, 'a') as f:
f.write(','.join(list(wit_df.columns)))
count = 0
appendix += 1
sub_fname = fname + '_' + str(appendix) + '.csv'
if count < batch or batch < 0:
with open(sub_fname, 'a') as f:
f.write(','.join(list(wit_df.columns)))
def annual_metrics(wit_data, members=['PV', 'WET', 'WATER', 'BS', 'NPV', ['NPV', 'PV', 'WET'],
['PV', 'WET'], ['WATER', 'WET']], threshold=[25, 75], pkey='SYSID'):
"""
Compute the annual max, min, mean, count with given wit data, members and threshold
input:
wit_data: dataframe of WIT
members: the elements which the metrics are computed against, can be a column from wit_data, e.g. 'PV'
or the sum of wit columns, e.g. ['WATER', 'WET']
threshold: a list of thresholds such that (elements >= threshold[i]) is True,
where i = 0, 1...len(threshold)-1
output:
dataframe of metrics
"""
years = wit_data['TIME']
i = 0
wit_df = wit_data.copy(deep=True)
for m in members:
if isinstance(m, list):
wit_df.insert(wit_df.columns.size+i, '+'.join(m), wit_df[m].sum(axis=1))
years = pd.DatetimeIndex(wit_df['TIME']).year.unique()
shape_id_list = wit_df[pkey].unique()
#shane changed 4 to 5 to accomodate median added below
wit_metrics = [pd.DataFrame()] * 5
for y in years:
wit_yearly = wit_df[pd.DatetimeIndex(wit_df['TIME']).year==y].drop(columns=['TIME']).groupby(pkey).max()
wit_yearly.insert(0, 'YEAR', y)
wit_yearly = wit_yearly.rename(columns={n: n+'_max' for n in wit_yearly.columns[1:]})
wit_metrics[0] = wit_metrics[0].append(wit_yearly, sort=False)
for y in years:
wit_yearly = wit_df[pd.DatetimeIndex(wit_df['TIME']).year==y].drop(columns=['TIME']).groupby(pkey).min()
wit_yearly.insert(0, 'YEAR', y)
wit_yearly = wit_yearly.rename(columns={n: n+'_min' for n in wit_yearly.columns[1:]})
wit_metrics[1] = wit_metrics[1].append(wit_yearly, sort=False)
for y in years:
wit_yearly = wit_df[pd.DatetimeIndex(wit_df['TIME']).year==y].drop(columns=['TIME']).groupby(pkey).mean()
wit_yearly.insert(0, 'YEAR', y)
wit_yearly = wit_yearly.rename(columns={n: n+'_mean' for n in wit_yearly.columns[1:]})
wit_metrics[2] = wit_metrics[2].append(wit_yearly, sort=False)
#*********************** START ADDED BY SHANE ***********************
#adding median
for y in years:
wit_yearly = wit_df[pd.DatetimeIndex(wit_df['TIME']).year==y].drop(columns=['TIME']).groupby(pkey).median()
wit_yearly.insert(0, 'YEAR', y)
wit_yearly = wit_yearly.rename(columns={n: n+'_median' for n in wit_yearly.columns[1:]})
wit_metrics[3] = wit_metrics[3].append(wit_yearly, sort=False)
#*********************** END ADDED BY SHANE ***********************
for y in years:
wit_yearly = wit_df[pd.DatetimeIndex(wit_df['TIME']).year==y][[pkey, 'BS']].groupby(pkey).count()
wit_yearly.insert(0, 'YEAR', y)
wit_yearly = wit_yearly.rename(columns={n: 'count' for n in wit_yearly.columns[1:]})
#shane changed index from 3 to 4 to accomodate median added above
wit_metrics[4] = wit_metrics[4].append(wit_yearly, sort=False)
for t in threshold:
wit_df_ts = wit_df.copy(deep=True)
wit_metrics += [pd.DataFrame()]
wit_df_ts.loc[:, wit_df_ts.columns[2:]] = wit_df_ts.loc[:, wit_df_ts.columns[2:]].mask((wit_df_ts[wit_df_ts.columns[2:]] < t/100), np.nan)
for y in years:
wit_yearly = wit_df_ts[pd.DatetimeIndex(wit_df_ts['TIME']).year==y].drop(columns=['TIME']).groupby(pkey).count()
wit_yearly.insert(0, 'YEAR', y)
wit_yearly = wit_yearly.rename(columns={n: n+'_count'+str(t) for n in wit_yearly.columns[1:]})
wit_metrics[-1] = wit_metrics[-1].append(wit_yearly, sort=False)
wit_yearly_metrics = wit_metrics[0]
for i in range(len(wit_metrics)-1):
wit_yearly_metrics = pd.merge(wit_yearly_metrics, wit_metrics[i+1], on=[pkey, 'YEAR'], how='inner')
return wit_yearly_metrics
def get_event_time(wit_ww, threshold, pkey='SYSID'):
"""
Compute inundation event time by given threshold
input:
wit_df: wetness computed from wit data
threshold: a value such that (WATER+WET > threshold) = inundation
output:
dateframe of inundation event time
"""
if isinstance(threshold, pd.DataFrame):
gid = wit_ww.index.unique()[0]
poly_threshold = threshold.loc[gid].to_numpy()[0]
else:
poly_threshold = threshold
i_start = wit_ww[wit_ww['WW'] >= poly_threshold]['TIME'].min()
if pd.isnull(i_start):
re = pd.DataFrame([[np.nan] * 4], columns=['start_time', 'end_time', 'duration', 'gap'], index=wit_ww.index.unique())
re.index.name = pkey
return re
re_idx = np.searchsorted(wit_ww[(wit_ww['WW'] < poly_threshold)]['TIME'].values,
wit_ww[(wit_ww['WW'] >= poly_threshold)]['TIME'].values)
re_idx, count = np.unique(re_idx, return_counts=True)
start_idx = np.zeros(len(count)+1, dtype='int')
start_idx[1:] = np.cumsum(count)
re_start = wit_ww[(wit_ww['WW'] >= poly_threshold)].iloc[start_idx[:-1]][['TIME']].rename(columns={'TIME': 'start_time'})
re_end = wit_ww[(wit_ww['WW'] >= poly_threshold)].iloc[start_idx[1:] - 1][['TIME']].rename(columns={'TIME': 'end_time'})
re = pd.concat([re_start, re_end], axis=1)
re.insert(2, 'duration',
(re['end_time'] - re['start_time'] + np.timedelta64(1, 'D')).astype('timedelta64[D]').astype('timedelta64[D]'))
re.insert(3, 'gap', np.concatenate([[np.timedelta64(0, 'D')],
(re['start_time'][1:].values - re['end_time'][:-1].values - np.timedelta64(1, 'D')).astype('timedelta64[D]')]))
re.insert(0, pkey, wit_ww.index.unique()[0])
return re.set_index(pkey)
def get_im_stats(grouped_wit, im_time, wit_area):
"""
Get inundation stats given wit data and events
input:
grouped_wit: wit data
im_time: inundation events in time
output:
the stats of inundation events
"""
gid = grouped_wit.index.unique()[0]
if gid not in im_time.indices.keys():
return pd.DataFrame([[np.nan]*5], columns=['start_time', 'max_wet', 'mean_wet', 'max_wet_area', 'mean_wet_area'],
index=[gid])
re_left = np.searchsorted(grouped_wit['TIME'].values.astype('datetime64'),
im_time.get_group(gid)['start_time'].values, side='left')
re_right = np.searchsorted(grouped_wit['TIME'].values.astype('datetime64'),
im_time.get_group(gid)['end_time'].values, side='right')
re = pd.DataFrame()
for a, b in zip(re_left, re_right):
tmp = pd.concat([grouped_wit.iloc[a:a+1]['TIME'].rename('start_time').astype('datetime64'),
pd.Series(grouped_wit.iloc[a:b]['WW'].max(),index=[gid], name='max_wet'),
pd.Series(grouped_wit.iloc[a:b]['WW'].mean(),index=[gid], name='mean_wet')],
axis=1)
tmp.insert(3, 'max_wet_area', tmp['max_wet'].values * wit_area[wit_area.index==gid].values)
tmp.insert(4, 'mean_wet_area', tmp['mean_wet'].values * wit_area[wit_area.index==gid].values)
re = re.append(tmp, sort=False)
re.index.name = grouped_wit.index.name
return re
def event_time(wit_df, threshold=0.01, pkey='SYSID'):
"""
Compute the inundation events with given wit data and threshold
input:
wit_df: wetness computed from wit data
threshold: a value such that (WATER+WET > threshold) = inundation,
output:
dataframe of events
"""
return wit_df.groupby(pkey).apply(get_event_time, threshold=threshold, pkey=pkey).dropna().droplevel(0)
def event_stats(wit_df, wit_im, wit_area, pkey='SYSID'):
"""
Compute inundation event stats with given wit wetness, events defined by (start_time, end_time)
and polygon areas
input:
wit_df: wetness computed from wit data
wit_im: inundation event
wit_area: polygon areas indexed by the key
output:
dataframe of event stats
"""
grouped_im = wit_im[['start_time', 'end_time']].groupby(pkey)
return wit_df.groupby(pkey).apply(get_im_stats, im_time=grouped_im, wit_area=wit_area).droplevel(0)
def inundation_metrics(wit_data, wit_area, threshold=0.01, pkey='SYSID'):
"""
Compute inundation metrics with given wit data, polygon areas and threshold
input:
wit_data: a dataframe of wit_data
wit_area: polygon areas indexed by the key
threshold: a value such that (WATER+WET > threshold) = inundation
output:
dataframe of inundation metrics
"""
wit_df = wit_data.copy(deep=True)
wit_df.insert(2, 'WW', wit_df[['WATER', 'WET']].sum(axis=1))
wit_df = wit_df.drop(columns=wit_df.columns[3:])
wit_df['TIME'] = wit_df['TIME'].astype('datetime64')
wit_df = wit_df.set_index(pkey)
wit_im_time = event_time(wit_df, threshold, pkey)
wit_im_stats = event_stats(wit_df, wit_im_time, wit_area, pkey)
return pd.merge(wit_im_time, wit_im_stats.dropna(), on=[pkey, 'start_time'], how='inner')
def interpolate_wit(grouped_wit, pkey='SYSID'):
daily_wit = pd.DataFrame({pkey: grouped_wit[pkey].unique()[0], 'TIME': pd.date_range(grouped_wit['TIME'].astype('datetime64[D]').min(), grouped_wit['TIME'].astype('datetime64[D]').max(), freq='D'),
'BS': np.nan, 'NPV': np.nan, 'PV': np.nan, 'WET': np.nan, 'WATER': np.nan})
_, nidx, oidx = np.intersect1d(daily_wit['TIME'].to_numpy().astype('datetime64[D]'), grouped_wit['TIME'].to_numpy().astype('datetime64[D]'),
return_indices=True)
daily_wit.loc[nidx, ["BS","NPV","PV","WET","WATER"]] = grouped_wit[["BS","NPV","PV","WET","WATER"]].iloc[oidx].to_numpy()
daily_wit = daily_wit.interpolate(axis=0)
return daily_wit
def all_time_median(wit_data, members=[['WATER', 'WET']], pkey='SYSID'):
"""
Compute the all time median
input:
wit_data: dataframe of WIT
members: the elements which the metrics are computed against, can be a column from wit_data, e.g. 'PV'
or the sum of wit columns, e.g. ['WATER', 'WET']
output:
dataframe of median indexed by pkey
"""
wit_df = wit_data.copy(deep=True)
i = 0
for m in members:
if isinstance(m, list):
wit_df.insert(wit_df.columns.size+i, '+'.join(m), wit_df[m].sum(axis=1))
i += 1
return wit_df.groupby(pkey).median()
shapefile_path = click.argument('shapefile', type=str, default='/g/data/r78/rjd547/DES-QLD_Project/data/Wet_WGS84_P.shp')
@click.group(help=__doc__)
def main():
pass
@main.command(name='wit-dump', help='dump wit data from database')
@shapefile_path
@click.option('--pkey', type=str, help='the key to match polygon in the shapefile, e.g., SYSID', default=None)
@click.option('--input-file', type=str, help='input csv with pkey', default=None)
@click.option('--output-file', type=str, help='output csv', default=None)
@click.option('--batch', type=int, help='how manay polygons per file', default=3000)
def wit_dump(shapefile, pkey, input_file, output_file, batch):
features = shape_list(pkey, | pd.read_csv(input_file, header=None) | pandas.read_csv |
#from _typeshed import NoneType
from bs4 import BeautifulSoup
import requests
import lxml,json
import pandas as pd
def export_data(df):
df.to_csv(r'Data\data_extra.csv',encoding='utf-8',mode='a', index=False)
print("done")
df = | pd.DataFrame(columns = ['Image_url', 'Name', 'Publisher','Release','Rating','Genre','URL', 'Metascore','Userscore','Platform', 'Summary']) | pandas.DataFrame |
import cv2
import os
import re
import numpy as np
import pandas as pd
import random
"""
This program is used to augment uv data by taking only the sick cells and create a mosaic of them and augmenting them
"""
# Constants
HOME = os.path.expanduser("~")
DATA_DIR = os.path.join(HOME, "Downloads", "AllUVScopePreProcData/")
# Known input data parameters for the UV microscope data
# using lumi disassemble
IMAGE_SHAPE = [520, 696]
TILE_SIZE_X = TILE_SIZE_Y = 120 # each rbc cell is in a 120 x 120 tile
INPUT_IMAGE_FORMAT = "tif"
BACKGROUND_COLOR = 214 # for 405 nm wavelength data, avg background intensity
NUM_TRIALS = 10 # Number of trials to find unoccupied random location subset
RANDOM_SEED = 42
# Change this to where you have the downloaded the dataset
im_dir = DATA_DIR + "{}/Matlab-RBC Instances/285 nm/sl{}/"
xls_file_name = DATA_DIR + "mergedMetadata_285nm_20200217.xls"
df = | pd.read_excel(xls_file_name, sheetname=None, ignore_index=True) | pandas.read_excel |
from collections import defaultdict
from sklearn import preprocessing
import signal
import influxdb_client
from influxdb_client import InfluxDBClient
from datetime import datetime
from sklearn.preprocessing import KBinsDiscretizer
import argparse
import ntopng_constants as ntopng_c
import numpy as np
import pandas as pd
import pathlib
import logging
import pyfluxc.pyfluxc as flux
from pyts.approximation import SymbolicAggregateApproximation as SAX
import re
import requests
import sys
import time
import importlib
importlib.reload(flux)
# ----- ----- PREPROCESSING ----- ----- #
# ----- ----- ------------- ----- ----- #
MORNING = np.array([1, 0, 0, 0])
AFTERNOON = np.array([0, 1, 0, 0])
EVENING = np.array([0, 0, 1, 0])
NIGHT = np.array([0, 0, 0, 1])
hour2ts = [(MORNING, range(6, 12)),
(AFTERNOON, range(12, 17)),
(EVENING, range(17, 22)),
(NIGHT, list(range(22, 24))+list(range(0, 6)))]
hour2ts = { h: t for t, hrange in hour2ts for h in hrange }
class Preprocessor():
def __init__(self, deltas=True, discretize=True, flevel="NF_BLMISC"):
self.flevel = flevel
self.compute_deltas = deltas
self.compute_discrtz = discretize
self.discretizer = KBinsDiscretizer(n_bins=15, encode="ordinal")
@staticmethod
def date_as_feature(df):
time_indx = df.index.get_level_values("_time")
weekend_map = defaultdict(lambda: 0, { 5: 1, 6: 1 })
df["time:is_weekend"] = time_indx.dayofweek.map(weekend_map)
mapped_hours = time_indx.hour.map(hour2ts).values.tolist()
hours_df = pd.DataFrame(mapped_hours, columns=["time:morning", "time:afternoon", "time:evening", "time:night"], index=df.index)
df = pd.concat([df, hours_df], axis=1)
return df
@staticmethod
def fillzero(df):
"""Replace zero traffic holes with rolling window mean
"""
missing_traffic = (df == 0).all(axis=1)
df[missing_traffic].replace(0, np.NaN)
r_mean = df.rolling(min_periods=1, window=3, center=True).sum().shift(-1) / 2
df.loc[missing_traffic] = r_mean[missing_traffic]
return df
def discretize(self, df, fit=False):
tc = [c for c in df.columns if (("ndpi_flows:num_flows" not in c) and ("time:" not in c))]
values = df[tc].values
if fit:
df[tc] = self.discretizer.fit_transform(values)
else:
df[tc] = self.discretizer.transform(values)
return df
def preprocessing(self, df):
smart_features = set(ntopng_c.FEATURE_LEVELS[self.flevel])
available_features = set(df.columns)
available_cols = available_features.intersection(smart_features)
if available_cols != smart_features:
missing_c = smart_features - available_cols
logging.warning(f"Missing columns: {missing_c}")
df = df[available_cols].copy(deep=True)
df[df<0] = 0
df = df.fillna(0)
df = Preprocessor.fillzero(df)
# DPI unit length normalization ..... #
ndpi_num_flows_c = [c for c in df.columns if "ndpi_flows:num_flows" in c]
ndpi = df[ndpi_num_flows_c]
ndpi_sum = ndpi.sum(axis=1)
df.loc[:, ndpi_num_flows_c] = ndpi.divide(ndpi_sum + 1e-3, axis=0)
# Non decreasing delta discretization ..... #
if self.compute_deltas:
# Filter selected non-stationary features
non_decreasing = [c for c in df.columns if c in ntopng_c.NON_DECREASING]
df[non_decreasing] = df[non_decreasing].groupby(level=["device_category", "host"], group_keys=False).apply(lambda group: group.diff())
df = df.fillna(0)
# Feature discretization ..... #
# Note: we avoided using pandas qcut/cut due to the decoupling between fit and transform
# offered by scikit. In the future {KBinsDiscretizer} will be fitted once a week or so
# with weekly data and used multiple times while the model is running
# if self.compute_discrtz:
# df = self.discretize(df, fit)
# Date/hour as a feature ..... #
df = Preprocessor.date_as_feature(df)
return df
# ----- ----- HOST DATA GENERATOR ----- ----- #
# ----- ----- ------------------- ----- ----- #
class FluxDataGenerator():
def __init__(self, bucket, windowsize, fluxclient, start, ntopng_conf):
self.last_timestamp = start
self.samples = None
self.fluxclient = fluxclient
self.bucket = bucket
self.ntopng_conf = ntopng_conf
self.host_map = {}
self.windowsize = windowsize
wndsize_val, wndsize_unit = re.match(r'([0-9]+)([a-zA-Z]+)', self.windowsize).groups()
self.window_timedelta = np.timedelta64(wndsize_val, wndsize_unit)
def to_pandas(self):
if self.samples is None:
raise ValueError('No samples available')
return self.samples.copy(deep=True)
def pull(self, start=None, stop=None):
if not start:
start = self.last_timestamp
utcnow = | pd.Timestamp.utcnow() | pandas.Timestamp.utcnow |
from sklearn.datasets import load_breast_cancer, fetch_california_housing
import pandas as pd
import numpy as np
import pickle
import os
import collections
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
def handle_categorical_feat(X_df):
''' It moves the categorical features to the last '''
original_columns = []
one_hot_columns = []
for col_name, dtype in zip(X_df.dtypes.index, X_df.dtypes):
if dtype == object:
one_hot_columns.append(col_name)
else:
original_columns.append(col_name)
X_df = X_df[original_columns + one_hot_columns]
return X_df, one_hot_columns
def load_breast_data():
breast = load_breast_cancer()
feature_names = list(breast.feature_names)
X, y = pd.DataFrame(breast.data, columns=feature_names), pd.Series(breast.target)
dataset = {
'problem': 'classification',
'full': {
'X': X,
'y': y,
},
'd_name': 'breast',
'search_lam': np.logspace(-1, 2.5, 15),
}
return dataset
def load_adult_data():
# https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data
df = pd.read_csv("./datasets/adult.data", header=None)
df.columns = [
"Age", "WorkClass", "fnlwgt", "Education", "EducationNum",
"MaritalStatus", "Occupation", "Relationship", "Race", "Gender",
"CapitalGain", "CapitalLoss", "HoursPerWeek", "NativeCountry", "Income"
]
train_cols = df.columns[0:-1]
label = df.columns[-1]
X_df = df[train_cols].copy()
# X_df = pd.get_dummies(X_df)
X_df, onehot_columns = handle_categorical_feat(X_df)
y_df = df[label].copy()
# Make it as 0 or 1
y_df.loc[y_df == ' >50K'] = 1.
y_df.loc[y_df == ' <=50K'] = 0.
y_df = y_df.astype(int)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'adult',
'search_lam': np.logspace(-2, 2, 15),
'n_splines': 50,
'onehot_columns': onehot_columns,
}
return dataset
def load_credit_data():
# https://www.kaggle.com/mlg-ulb/creditcardfraud
df = pd.read_csv(r'./datasets/creditcard.csv')
train_cols = df.columns[0:-1]
label = df.columns[-1]
X_df = df[train_cols]
y_df = df[label]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'credit',
'search_lam': np.logspace(-0.5, 2.5, 8),
}
return dataset
def load_churn_data():
# https://www.kaggle.com/blastchar/telco-customer-churn/downloads/WA_Fn-UseC_-Telco-Customer-Churn.csv/1
df = pd.read_csv(r'./datasets/WA_Fn-UseC_-Telco-Customer-Churn.csv')
train_cols = df.columns[1:-1] # First column is an ID
label = df.columns[-1]
X_df = df[train_cols].copy()
# Handle special case of TotalCharges wronly assinged as object
X_df['TotalCharges'][X_df['TotalCharges'] == ' '] = 0.
X_df.loc[:, 'TotalCharges'] = pd.to_numeric(X_df['TotalCharges'])
# X_df = pd.get_dummies(X_df)
X_df, onehot_columns = handle_categorical_feat(X_df)
y_df = df[label].copy() # 'Yes, No'
# Make it as 0 or 1
y_df[y_df == 'Yes'] = 1.
y_df[y_df == 'No'] = 0.
y_df = y_df.astype(int)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'churn',
'search_lam': np.logspace(0, 3, 15),
'onehot_columns': onehot_columns,
}
return dataset
def load_pneumonia_data(folder='/media/intdisk/medical/RaniHasPneumonia/'):
featurename_file = os.path.join(folder, 'featureNames.txt')
col_names = pd.read_csv(featurename_file, delimiter='\t', header=None, index_col=0).iloc[:, 0].values
def read_data(file_path='pneumonia/RaniHasPneumonia/medis9847c.data'):
df = pd.read_csv(file_path, delimiter='\t', header=None)
df = df.iloc[:, :-1] # Remove the last empty wierd column
df.columns = col_names
return df
df_train = read_data(os.path.join(folder, 'medis9847c.data'))
df_test = read_data(os.path.join(folder, 'medis9847c.test'))
df = pd.concat([df_train, df_test], axis=0).reset_index(drop=True)
X_df = df.iloc[:, :-1]
y_df = df.iloc[:, -1]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'test_size': 4352 / 14199,
'd_name': 'pneumonia',
'search_lam': np.logspace(0, 3, 15),
}
return dataset
def load_heart_data():
# https://www.kaggle.com/sonumj/heart-disease-dataset-from-uci
df = pd.read_csv('./datasets/HeartDisease.csv')
label = df.columns[-2]
train_cols = list(df.columns[1:-2]) + [df.columns[-1]]
X_df = df[train_cols]
y_df = df[label]
# X_df = pd.get_dummies(X_df)
X_df, onehot_columns = handle_categorical_feat(X_df)
# Impute the missingness as 0
X_df = X_df.apply(lambda col: col if col.dtype == object else col.fillna(0.), axis=0)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'heart',
'search_lam': np.logspace(0, 3, 15),
'onehot_columns': onehot_columns,
}
return dataset
def load_mimiciii_data():
df_adult = pd.read_csv('./datasets/adult_icu.gz', compression='gzip')
train_cols = [
'age', 'first_hosp_stay', 'first_icu_stay', 'adult_icu', 'eth_asian',
'eth_black', 'eth_hispanic', 'eth_other', 'eth_white',
'admType_ELECTIVE', 'admType_EMERGENCY', 'admType_NEWBORN',
'admType_URGENT', 'heartrate_min', 'heartrate_max', 'heartrate_mean',
'sysbp_min', 'sysbp_max', 'sysbp_mean', 'diasbp_min', 'diasbp_max',
'diasbp_mean', 'meanbp_min', 'meanbp_max', 'meanbp_mean',
'resprate_min', 'resprate_max', 'resprate_mean', 'tempc_min',
'tempc_max', 'tempc_mean', 'spo2_min', 'spo2_max', 'spo2_mean',
'glucose_min', 'glucose_max', 'glucose_mean', 'aniongap', 'albumin',
'bicarbonate', 'bilirubin', 'creatinine', 'chloride', 'glucose',
'hematocrit', 'hemoglobin', 'lactate', 'magnesium', 'phosphate',
'platelet', 'potassium', 'ptt', 'inr', 'pt', 'sodium', 'bun', 'wbc']
label = 'mort_icu'
X_df = df_adult[train_cols]
y_df = df_adult[label]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'mimiciii',
'search_lam': np.logspace(0, 3, 15),
}
return dataset
def load_mimicii_data():
cols = ['Age', 'GCS', 'SBP', 'HR', 'Temperature',
'PFratio', 'Renal', 'Urea', 'WBC', 'CO2', 'Na', 'K',
'Bilirubin', 'AdmissionType', 'AIDS',
'MetastaticCancer', 'Lymphoma', 'HospitalMortality']
table = pd.read_csv('./datasets/mimic2.data', delimiter=' ', header=None)
table.columns = cols
X_df = table.iloc[:, :-1]
y_df = table.iloc[:, -1]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'mimicii',
'search_lam': np.logspace(-2, 3.5, 15),
}
return dataset
def load_diabetes2_data(load_cache=False):
cache_dataset_path = './datasets/diabetes_cache.pkl'
if load_cache and os.path.exists(cache_dataset_path):
print('Find the diabetes dataset. Load from cache.')
with open(cache_dataset_path, 'rb') as fp:
dataset = pickle.load(fp)
return dataset
df = pd.read_csv('./datasets/dataset_diabetes/diabetic_data.csv')
x_cols = df.columns[2:-1]
y_col = df.columns[-1]
X_df = df[x_cols].copy()
y_df = df[y_col].copy()
y_df.loc[(y_df == 'NO') | (y_df == '>30')] = 0
y_df.loc[y_df == '<30'] = 1
# is_false = (y_df == 'NO')
# y_df.loc[is_false] = 0
# y_df.loc[~is_false] = 1
y_df = y_df.astype(int)
# Preprocess X
X_df.loc[:, 'age'] = X_df.age.apply(lambda s: (int(s[1:s.index('-')]) + int(s[(s.index('-') + 1):-1])) / 2).astype(int)
X_df.loc[:, 'weight'] = X_df.weight.apply(lambda s: 0. if s == '?' else ((float(s[1:s.index('-')]) + float(s[(s.index('-') + 1):-1])) / 2 if '-' in s else float(s[1:])))
X_df.loc[:, 'admission_source_id'] = X_df.admission_source_id.astype('object')
X_df.loc[:, 'admission_type_id'] = X_df.admission_type_id.astype('object')
X_df.loc[:, 'discharge_disposition_id'] = X_df.discharge_disposition_id.astype('object')
X_df.loc[:, 'change'] = X_df.change.apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'diabetesMed'] = X_df.diabetesMed.apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'metformin-pioglitazone'] = X_df['metformin-pioglitazone'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'metformin-rosiglitazone'] = X_df['metformin-rosiglitazone'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'glipizide-metformin'] = X_df['glipizide-metformin'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'troglitazone'] = X_df['troglitazone'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'tolbutamide'] = X_df['tolbutamide'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df.loc[:, 'acetohexamide'] = X_df['acetohexamide'].apply(lambda s: 0 if s == 'No' else 1).astype(np.uint8)
X_df = X_df.drop(['citoglipton', 'examide'], axis=1) # Only have NO in the data
# diag_combined = X_df.apply(lambda x: set(
# [x.diag_1 for i in range(1) if x.diag_1 != '?'] + [x.diag_2 for i in range(1) if x.diag_2 != '?'] + [x.diag_3 for i in range(1) if x.diag_3 != '?']
# ), axis=1)
# diag_combined = diag_combined.apply(collections.Counter)
# diag_multihot_encode = pd.DataFrame.from_records(diag_combined).fillna(value=0).astype(np.uint8)
# diag_multihot_encode.columns = ['diag_%s' % str(c) for c in diag_multihot_encode.columns]
X_df = X_df.drop(['diag_1', 'diag_2', 'diag_3'], axis=1)
# X_df = pd.concat([X_df, diag_multihot_encode], axis=1)
# X_df = pd.get_dummies(X_df)
X_df, onehot_columns = handle_categorical_feat(X_df)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'diabetes2',
'search_lam': np.logspace(-3, 2, 8),
'n_splines': 50,
'onehot_columns': onehot_columns,
}
with open(cache_dataset_path, 'wb') as op:
pickle.dump(dataset, op)
return dataset
def load_TCGA_data(test_split=0.33, n_splits=20, cosmic=True, random_state=1377, **kwargs):
np.random.seed(random_state)
filename = 'pancancer_cosmic.npz' if cosmic else 'pancancer_parsed.npz'
x = np.load('datasets/TCGA/%s' % filename)['arr_0']
# log transform
x_df = pd.DataFrame(np.log10(x + 1))
# append the column name
transcript_names_path = 'transcript_names_cosmic' if cosmic else 'transcript_names'
x_df.columns = np.load('datasets/TCGA/%s.npy' % transcript_names_path)
# remove the columns with std as 0
x_df = x_df.loc[:, (x.std(axis=0) > 0.)]
covars = pd.read_csv('datasets/TCGA/potential_covariates.tsv', delimiter='\t')
covars['label'] = np.logical_or(covars[['sample_type']] == 'Primary Blood Derived Cancer - Peripheral Blood',
np.logical_or(covars[['sample_type']] == 'Additional Metastatic',
np.logical_or(covars[['sample_type']] == 'Recurrent Tumor',
np.logical_or(covars[['sample_type']] == 'Additional - New Primary',
np.logical_or(covars[['sample_type']] == 'Metastatic',
covars[['sample_type']] == 'Primary Tumor')))))
stratify_lookup = covars.groupby('submitter_id').label.apply(lambda x: len(x))
covars['stratify'] = covars.submitter_id.apply(lambda x: stratify_lookup[x])
covars = covars[['submitter_id', 'label', 'stratify']]
covars['patient_idxes'] = list(range(covars.shape[0]))
def group_shuffle_split():
for _ in range(n_splits):
train_lookups = []
for num_record, df2 in covars.groupby('stratify'):
train_lookup = df2.groupby('submitter_id').apply(lambda x: True)
# randomly change them to be 0
all_idxes = np.arange(len(train_lookup))
np.random.shuffle(all_idxes)
is_test_idxes = all_idxes[:int(len(train_lookup) * test_split)]
train_lookup[is_test_idxes] = False
train_lookups.append(train_lookup)
train_lookups = pd.concat(train_lookups)
covars['is_train'] = covars.submitter_id.apply(lambda x: train_lookups[x])
train_idxes = covars.patient_idxes[covars.is_train].values
test_idxes = covars.patient_idxes[~covars.is_train].values
yield train_idxes, test_idxes
y = covars['label'].astype(float)
stratify = covars['stratify']
dataset = {
'problem': 'classification',
'full': {
'X': x_df,
'y': y,
'ss': group_shuffle_split(),
},
'd_name': 'TCGA-cosmic' if cosmic else 'TCGA-full',
}
return dataset
def load_support2cls2_data():
# http://biostat.mc.vanderbilt.edu/wiki/Main/DataSets
df = pd.read_csv('./datasets/support2/support2.csv')
one_hot_encode_cols = ['sex', 'dzclass', 'race' , 'ca', 'income']
target_variables = ['hospdead']
remove_features = ['death', 'slos', 'd.time', 'dzgroup', 'charges', 'totcst',
'totmcst', 'aps', 'sps', 'surv2m', 'surv6m', 'prg2m', 'prg6m',
'dnr', 'dnrday', 'avtisst', 'sfdm2']
df = df.drop(remove_features, axis=1)
rest_colmns = [c for c in df.columns if c not in (one_hot_encode_cols + target_variables)]
# Impute the missing values for 0.
df[rest_colmns] = df[rest_colmns].fillna(0.)
# df = pd.get_dummies(df)
df, onehot_columns = handle_categorical_feat(df)
df['income'][df['income'].isna()] = 'NaN'
df['income'][df['income'] == 'under $11k'] = ' <$11k'
df['race'][df['race'].isna()] = 'NaN'
X_df = df.drop(target_variables, axis=1)
y_df = df[target_variables[0]]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'support2cls2',
'search_lam': np.logspace(1.5, 4, 15),
'onehot_columns': onehot_columns,
}
return dataset
def load_onlinenewscls_data():
dataset = load_onlinenews_data()
y_df = dataset['full']['y']
y_df[y_df < 1400] = 0
y_df[y_df >= 1400] = 1
X_df = dataset['full']['X']
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'onlinenewscls',
'search_lam': np.logspace(0, 4, 15),
}
return dataset
def load_compass_data():
df = pd.read_csv('./datasets/recid.csv', delimiter=',')
target_variables = ['two_year_recid']
# df = pd.get_dummies(df, prefix=one_hot_encode_cols)
df, onehot_columns = handle_categorical_feat(df)
X_df = df.drop(target_variables, axis=1)
# X_df = X_df.astype('float64')
y_df = df[target_variables[0]]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'compass',
'onehot_columns': onehot_columns,
}
return dataset
def load_compas2_data():
df = pd.read_csv('./datasets/recid_score.csv', delimiter=',')
target_variables = ['decile_score']
# df = pd.get_dummies(df, prefix=one_hot_encode_cols)
df, onehot_columns = handle_categorical_feat(df)
X_df = df.drop(target_variables, axis=1)
y_df = df[target_variables[0]]
dataset = {
'problem': 'regression',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'compas2',
'onehot_columns': onehot_columns,
}
return dataset
def load_gcredit_data():
''' Load German Credit dataset '''
df = pd.read_csv('./datasets/german_credit/credit.data', delimiter='\t', header=None)
df.columns = [
'checking_balance', 'months_loan_duration', 'credit_history', 'purpose', 'amount',
'savings_balance', 'employment_length', 'installment_rate', 'personal_status',
'other_debtors', 'residence_history', 'property', 'age', 'installment_plan', 'housing',
'existing_credits', 'dependents', 'telephone', 'foreign_worker', 'job', 'class']
target_variables = ['class']
# df, onehot_columns = handle_categorical_feat(df)
X_df = df.drop(target_variables, axis=1)
y_df = df[target_variables[0]]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'gcredit',
}
return dataset
''' =================== Regression Datasets ==================== '''
def load_bikeshare_data():
df = pd.read_csv('./datasets/bikeshare/hour.csv').set_index('instant')
train_cols = ['season', 'yr', 'mnth', 'hr', 'holiday', 'weekday',
'workingday', 'weathersit', 'temp', 'atemp', 'hum', 'windspeed']
label = 'cnt'
X_df = df[train_cols]
y_df = df[label]
dataset = {
'problem': 'regression',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'bikeshare',
'search_lam': np.logspace(-0.5, 2, 15),
}
return dataset
def load_calhousing_data():
X, y = fetch_california_housing(data_home='./datasets/', download_if_missing=True, return_X_y=True)
columns = ['MedInc', 'HouseAge', 'AveRooms', 'AveBedrms', 'Population', 'AveOccup', 'Latitude', 'Longitude']
dataset = {
'problem': 'regression',
'full': {
'X': pd.DataFrame(X, columns=columns),
'y': pd.Series(y),
},
'd_name': 'bikeshare',
'search_lam': np.logspace(-2, 1.5, 15),
'discrete': False, # r-spline args, or it will fail
}
return dataset
def load_wine_data():
df = pd.read_csv('./datasets/winequality-white.csv', delimiter=';')
train_cols = ['fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar', 'chlorides',
'free sulfur dioxide', 'total sulfur dioxide', 'density', 'pH', 'sulphates', 'alcohol']
label = 'quality'
X_df = df[train_cols]
y_df = df[label]
dataset = {
'problem': 'regression',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'wine',
'search_lam': np.logspace(0, 3.5, 15),
}
return dataset
def load_support2reg2_data():
df = pd.read_csv('./datasets/support2/support2.csv')
one_hot_encode_cols = ['sex', 'dzclass', 'race' , 'ca', 'income']
target_variables = ['slos']
remove_features = ['death', 'd.time', 'dzgroup', 'charges', 'totcst',
'totmcst', 'aps', 'sps', 'surv2m', 'surv6m', 'prg2m', 'prg6m',
'dnr', 'dnrday', 'avtisst', 'sfdm2', 'hospdead']
df = df.drop(remove_features, axis=1)
rest_colmns = [c for c in df.columns if c not in (one_hot_encode_cols + target_variables)]
# Impute the missing values for 0.
df[rest_colmns] = df[rest_colmns].fillna(0.)
# df = pd.get_dummies(df)
df, onehot_columns = handle_categorical_feat(df)
df['income'][df['income'].isna()] = 'NaN'
df['income'][df['income'] == 'under $11k'] = ' <$11k'
df['race'][df['race'].isna()] = 'NaN'
X_df = df.drop(target_variables, axis=1)
# X_df = X_df.astype('float64')
y_df = df[target_variables[0]]
dataset = {
'problem': 'regression',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'support2reg2',
'search_lam': np.logspace(0.5, 4, 15),
'onehot_columns': onehot_columns,
}
return dataset
def load_onlinenews_data():
df = pd.read_csv('./datasets/onlinenews/OnlineNewsPopularity.csv')
label = ' shares'
train_cols = [' timedelta', ' n_tokens_title', ' n_tokens_content',
' n_unique_tokens', ' n_non_stop_words', ' n_non_stop_unique_tokens',
' num_hrefs', ' num_self_hrefs', ' num_imgs', ' num_videos',
' average_token_length', ' num_keywords', ' data_channel_is_lifestyle',
' data_channel_is_entertainment', ' data_channel_is_bus',
' data_channel_is_socmed', ' data_channel_is_tech',
' data_channel_is_world', ' kw_min_min', ' kw_max_min', ' kw_avg_min',
' kw_min_max', ' kw_max_max', ' kw_avg_max', ' kw_min_avg',
' kw_max_avg', ' kw_avg_avg', ' self_reference_min_shares',
' self_reference_max_shares', ' self_reference_avg_sharess',
' weekday_is_monday', ' weekday_is_tuesday', ' weekday_is_wednesday',
' weekday_is_thursday', ' weekday_is_friday', ' weekday_is_saturday',
' weekday_is_sunday', ' is_weekend', ' LDA_00', ' LDA_01', ' LDA_02',
' LDA_03', ' LDA_04', ' global_subjectivity',
' global_sentiment_polarity', ' global_rate_positive_words',
' global_rate_negative_words', ' rate_positive_words',
' rate_negative_words', ' avg_positive_polarity',
' min_positive_polarity', ' max_positive_polarity',
' avg_negative_polarity', ' min_negative_polarity',
' max_negative_polarity', ' title_subjectivity',
' title_sentiment_polarity', ' abs_title_subjectivity',
' abs_title_sentiment_polarity', ' shares']
X_df = df[train_cols]
y_df = df[label]
# Capped the largest shares to 5000 to avoid extreme values
y_df[y_df > 5000] = 5000
dataset = {
'problem': 'regression',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'onlinenews',
'search_lam': np.logspace(-3, 2, 12),
'discrete': False, # r-spline args, or it will fail
}
return dataset
def load_semi_synthetic_dataset(d_name, cache_dir='./datasets/ss_datasets/', overwrite=False):
'''
E.g. "ss_pneumonia_b0_r1377_xgb-d1_sh0.8_inc0.5"
'''
import models_utils
tmp = d_name.split('_')
real_d_name = tmp[1]
binarize = int(tmp[2][1:])
random_state = int(tmp[3][1:])
the_rest = tmp[4:]
if binarize:
non_b_d_name = 'ss_%s_b0_r%d_%s' % (real_d_name, random_state, '_'.join(the_rest))
d_set = load_semi_synthetic_dataset(non_b_d_name, cache_dir=cache_dir, overwrite=overwrite)
assert d_set['problem'] == 'regression', 'Only log-odds can be binarized!'
r = np.random.RandomState(random_state)
targets = r.binomial(1, 1. / (1. + np.exp(-d_set['full']['y'].values)))
d_set['full']['y'] = pd.Series(targets)
d_set['problem'] = 'classification'
d_set['d_name'] = d_name
print('Finish loading dataset %s' % d_name)
return d_set
if tmp[-1].startswith('inc'):
# Load original dataset
d_name_without_inc = '_'.join(tmp[:-1])
dataset = load_semi_synthetic_dataset(d_name_without_inc, cache_dir=cache_dir, overwrite=overwrite)
# Do the increment part
increment_fold = float(tmp[-1][3:])
increment_X = get_shuffle_X(dataset['full']['X'], increment_fold, random_state=random_state)
# Produce target by model
models = pickle.load(open(dataset['models_path'], 'rb'))
increment_y = gen_targets(increment_X, models)
dataset['full']['X'] = | pd.concat([dataset['full']['X'], increment_X], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
# @Time : 2019/10/25 10:46
# @Author : <NAME>
# @FileName: parse_eggNOG.py
# @Usage: """non-module organisms are always lack of KEGG and GO information. A convenient method is use eggNOG server
# to annotate. This script is used for parse the eggNOG result to a user friendly format which could be
# directly applied in clusterProfile R package"""
# @E-mail: <EMAIL>
import pandas as pd
import numpy as np
import argparse
import requests
import re
import os
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0"}
def get_html(url):
resp = requests.get(url, headers=headers)
return resp.text
def own_mapdb(org_list):
"""
Pick map num based on user prefer
:param org_list:organism name list (KEGG abb). eg. ["ath","osa","dosa",.....]
:return: own_maplist: The picked map num: eg. [map0001,map0002,.....,map1111]
"""
if not org_list == ["all"]:
map_set = set()
for org in org_list:
tmp = get_html("http://rest.kegg.jp/list/pathway/"+org)
tmp = list(map(lambda x: x.split("\t"), tmp.split("\n")))
tmp_df = pd.DataFrame(tmp, columns=["pathway", "description"]).dropna()
map_set = map_set.union(set(tmp_df.pathway.apply(lambda x: re.search("[0-9]{5}", str(x)).group())))
map_list = list(map(lambda x: "map"+x, list(map_set)))
return map_list
else:
return None
def get_KEGGmap(own_maplist):
"""
INPUT:own_maplist: The picked map num: eg. [map0001,map0002,.....,map1111]
:return: KO2map_df:columns: [KO,pathway]
KEGGmap_df: columns: [pathway, description]
"""
# KEGG map description
KEGGmap = get_html("http://rest.kegg.jp/list/pathway/")
KEGGmap = list(map(lambda x: x.split("\t"), KEGGmap.split("\n")))
KEGGmap_df = pd.DataFrame(KEGGmap, columns=["pathway", "description"]).dropna()
KEGGmap_df["pathway"] = KEGGmap_df.pathway.apply(lambda x: x.strip("path:"))
# KEGG KO2map
KO2map = get_html("http://rest.kegg.jp/link/pathway/ko")
KO2map = list(map(lambda x: x.split("\t"), KO2map.split("\n")))
KO2map_df = pd.DataFrame(KO2map, columns=["ko", "pathway"]).dropna()
KO2map_df["pathway"] = KO2map_df.pathway.apply(lambda x: x.strip("path:"))
KO2map_df["KO"] = KO2map_df.ko.apply(lambda x: x.strip("ko:"))
if own_maplist:
KEGGmap_df = KEGGmap_df[KEGGmap_df["pathway"].isin(own_maplist)]
KO2map_df = KO2map_df[KO2map_df["pathway"].isin(own_maplist)]
return KO2map_df, KEGGmap_df
def parse_KO_f1(query_line):
"""
:param query_line: a line in eggNOG annotation file which contains a query result
:return: a dict for parse_KO function. eg. {"gene":gene name,"KO": KO}
"""
KO_list = [i for i in map(lambda x: x.lstrip("ko:"), query_line["KEGG_ko"].split(","))]
return {"gene": query_line["Query"], "KO": KO_list}
def parse_KO(df4parse, org_list):
"""
parse the KEGG part in eggNOG annotation file
:param df4parse: the pd.Dataframe object directly comes from the eggNOG annotation file(skip the comment lines, of course)
:return:the parsed KEGG annotation in pd.Dataframe.
"""
KO2map, KEGGmap = get_KEGGmap(own_mapdb(org_list))
gene2KO = df4parse[["Query", "KEGG_ko"]].dropna().apply(parse_KO_f1, axis=1, result_type="expand")
gene2KO = pd.DataFrame({'gene': gene2KO.gene.repeat(gene2KO["KO"].str.len()), 'KO': np.concatenate(gene2KO["KO"].values)})
gene2map = pd.merge(gene2KO, KO2map, on="KO", how="left")
gene2map = pd.merge(gene2map, KEGGmap, on="pathway", how="left")
return gene2map.dropna()
def parse_GO_f1(query_line):
"""
:param query_line: a line in eggNOG annotation file which contains a query result
:return: a dict for parse_KO function. eg. {"gene":gene name,"GO": KO}
"""
GO_list = [i for i in query_line["GOs"].split(",")]
return {"gene": query_line["Query"], "GO": GO_list}
def parse_GO(df4parse, GO_path):
"""
parse the GO part in eggNOG annotation file
:param df4parse: the pd.Dataframe object directly comes from the eggNOG annotation file(skip the comment lines, of course)
:return:the parsed GO annotation in pd.Dataframe.
"""
gene2GO = df4parse[["Query", "GOs"]].dropna().apply(parse_GO_f1, axis=1, result_type="expand")
gene2GO = pd.DataFrame(
{'gene': gene2GO.gene.repeat(gene2GO["GO"].str.len()), 'GO': np.concatenate(gene2GO["GO"].values)})
go_df = pd.read_table(GO_path)
go_df.drop(columns=['Description'], inplace=True)
gene2GO = pd.merge(gene2GO, go_df, on="GO", how="left")
return gene2GO
def main(input_file, out_dir, go_file, org_list):
file4parse = | pd.read_csv(input_file, sep="\t", comment="#", usecols=[0, 6, 8], names=['Query', 'GOs', "KEGG_ko"]) | pandas.read_csv |
import pandas as pd
import tensorflow as tf
# from IPython.display import clear_output
from matplotlib import pyplot as plt
from sklearn.metrics import roc_curve
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
# dftrain.head()
# dftrain.describe()
dftrain.age.hist(bins=20)
dftrain.sex.value_counts().plot(kind='barh')
dftrain['class'].value_counts().plot(kind='barh')
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive')
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
vocabulary = dftrain[feature_name].unique()
feature_columns.append(tf.feature_column.categorical_column_with_vocabulary_list(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name, dtype=tf.float32))
def make_input_fn(data_df, label_df, num_epochs=10, shuffle=True, batch_size=32):
def input_function():
ds = tf.data.Dataset.from_tensor_slices((dict(data_df), label_df))
if shuffle:
ds = ds.shuffle(1000)
ds = ds.batch(batch_size).repeat(num_epochs)
return ds
return input_function
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, num_epochs=1, shuffle=False)
ds = make_input_fn(dftrain, y_train, batch_size=10)()
for feature_batch, label_batch in ds.take(1):
print('Some feature keys:', list(feature_batch.keys()))
print()
print('A batch of class:', feature_batch['class'].numpy())
print()
print('A batch of Labels:', label_batch.numpy())
age_column = feature_columns[7]
tf.keras.layers.DenseFeatures([age_column])(feature_batch).numpy()
gender_column = feature_columns[0]
tf.keras.layers.DenseFeatures([tf.feature_column.indicator_column(gender_column)])(feature_batch).numpy()
linear_est = tf.estimator.LinearClassifier(feature_columns=feature_columns)
linear_est.train(train_input_fn)
result = linear_est.evaluate(eval_input_fn)
# clear_output()
print(result)
age_x_gender = tf.feature_column.crossed_column(['age', 'sex'], hash_bucket_size=100)
derived_feature_columns = [age_x_gender]
linear_est = tf.estimator.LinearClassifier(feature_columns=feature_columns + derived_feature_columns)
linear_est.train(train_input_fn)
result = linear_est.evaluate(eval_input_fn)
# clear_output()
print(result)
pred_dicts = list(linear_est.predict(eval_input_fn))
probs = | pd.Series([pred['probabilities'][1] for pred in pred_dicts]) | pandas.Series |
import os
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn import model_selection
from time import time
import argparse
import pickle
GAS_STATIONS_PATH = os.path.join('..', '..', 'data', 'raw', 'input_data', 'Eingabedaten', 'Tankstellen.csv')
GAS_PRICE_PATH = os.path.join('..', '..', 'data', 'raw', 'input_data', 'Eingabedaten', 'Benzinpreise')
SIMPLE_RNN_MODEL = 'resampled'
EVENT_RNN_MODEL = 'event'
TRAIN = 'train'
PREDICT = 'predict'
gas_stations_df = pd.read_csv(GAS_STATIONS_PATH, sep=';',
names=['id', 'Name', 'Company', 'Street', 'House_Number', 'Postalcode', 'City', 'Lat',
'Long'], index_col='id')
def parse_arguments():
global args
parser = argparse.ArgumentParser()
parser.add_argument("--model",
type=str,
default=SIMPLE_RNN_MODEL,
help="Name of model, can be one of {}".format(str([SIMPLE_RNN_MODEL, EVENT_RNN_MODEL])))
parser.add_argument("-g", "--gas_station",
type=int,
required=True,
help="Number of gas station to learn on")
parser.add_argument("--batch_size",
type=int,
default=128,
help="Batch size")
parser.add_argument("--layers",
type=int,
default=1,
help="Number of LSTM layers")
parser.add_argument("--lstm_size",
type=int,
default=80,
help="Size of LSTM hidden state")
parser.add_argument("--length_of_sequence",
default=200,
type=int,
help="Length of prediction sequences")
parser.add_argument("--future_prediction",
default=1,
type=int,
help="Choose how much into the future should be predicted, '1' means the next value")
parser.add_argument("--dense_hidden_units",
type=int,
default=100,
help="Number of hidden units in dense layer before output layer")
parser.add_argument("-m", "--memory",
default=1,
type=float,
help="Percentage of VRAM for this process, written as 0.")
parser.add_argument("-n", "--name",
type=str,
required=True,
help="Name of training")
parser.add_argument("--chkpt_path",
type=str,
required=True,
help="Path for saving the checkpoints")
parser.add_argument("--log_path",
type=str,
required=True,
help="Path for saving the logs")
parser.add_argument("--resampling",
type=str,
default='1D',
help="Resampling of data frame")
parser.add_argument('--additional_gas_stations',
nargs='+',
type=int,
help="Additional gas stations which are feed into the model",
default=[])
parser.add_argument("--sequence_stride",
type=int,
help="At every sequence stride a new sequence is extracted for the training and test data.",
default=1)
parser.add_argument("--mode",
type=str,
default=TRAIN,
help="Can be one of {}".format(str([TRAIN, PREDICT])))
parser.add_argument("--predict_n_steps",
type=int,
help="Number of steps (either frequency or number of events) that should be predicted.",
default=30)
parser.add_argument("--predict_start_point",
type=int,
help="Number of steps (either frequency or number of events) from the end of the given series "
"from where the predition should start (e.g. 30 to predict last month of available data.",
default=30)
parser.add_argument("--prediction_save_path",
type=str,
required=True,
help="Path for saving the prediction should include '.pkl' extension")
return parser.parse_args()
def predict(X, sess, features_placeholder, predict):
y = sess.run(predict, feed_dict={features_placeholder: X})
return y
def train(X_test, X_val, chkpt_path, features_placeholder, gpu_options, init, iterator, log_path, merged, next_elem,
saver, seed, train_step, name_of_training):
log_path = log_path + "/" + name_of_training
chkpt_path = chkpt_path + "/" + name_of_training + "/"
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
if tf.train.latest_checkpoint(chkpt_path) is not None:
saver.restore(sess, tf.train.latest_checkpoint(chkpt_path))
print("Restored model from " + tf.train.latest_checkpoint(chkpt_path))
else:
print("No model was loaded")
train_writer = tf.summary.FileWriter(log_path + '/train', sess.graph)
test_writer = tf.summary.FileWriter(log_path + '/test')
val_writer = tf.summary.FileWriter(log_path + '/val')
sess.run(iterator.initializer, feed_dict={seed: int(time())})
sess.run(init)
training_cycles = 0
while True:
try:
elem = sess.run(next_elem)
except tf.errors.OutOfRangeError:
# TODO: Save trained modell
sess.run(iterator.initializer, feed_dict={seed: int(time())})
elem = sess.run(next_elem)
if training_cycles == 0:
summary = sess.run(merged, feed_dict={features_placeholder: elem})
train_writer.add_summary(summary, training_cycles)
if training_cycles % 50 == 0:
summary_test = sess.run(merged, feed_dict={features_placeholder: X_test})
test_writer.add_summary(summary_test, training_cycles)
summary_val = sess.run(merged, feed_dict={features_placeholder: X_val})
val_writer.add_summary(summary_val, training_cycles)
summary, _ = sess.run([merged, train_step], feed_dict={features_placeholder: elem})
if training_cycles % 5000 == 0:
if not os.path.exists(chkpt_path):
os.makedirs(chkpt_path)
save_path = saver.save(sess, chkpt_path + name_of_training + "-" + str(training_cycles) + '.ckpt')
training_cycles += 1
train_writer.add_summary(summary, training_cycles)
def define_simple_rnn_model(dense_hidden_units, future_prediction, length_of_each_sequence, number_of_layers, lstm_size,
number_of_additional_gas_stations=0):
features_placeholder = tf.placeholder(tf.float32,
[None, length_of_each_sequence, 1 + number_of_additional_gas_stations],
name='features_placeholder')
seed = tf.placeholder(tf.int64, shape=[])
def lstm_cell():
return tf.nn.rnn_cell.LSTMCell(lstm_size)
stacked_lstm = [lstm_cell() for _ in range(number_of_layers)]
# create a RNN cell composed sequentially of a number of RNNCells
multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(stacked_lstm)
# 'outputs' is a tensor of shape [batch_size, max_time, 256]
# 'state' is a N-tuple where N is the number of LSTMCells containing a
# tf.contrib.rnn.LSTMStateTuple for each cell
outputs, state = tf.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=features_placeholder[:, :-future_prediction],
dtype=tf.float32)
# final_state = state[-1]
dense = tf.layers.dense(outputs[:, -1], dense_hidden_units, activation=tf.nn.relu, name='dense',
kernel_initializer=tf.truncated_normal_initializer(mean=0.001, stddev=0.1),
bias_initializer=tf.truncated_normal_initializer(mean=0.01, stddev=0.1))
y_ = tf.layers.dense(dense, 1 + number_of_additional_gas_stations, activation=None, name='y_',
kernel_initializer=tf.truncated_normal_initializer(mean=0.001, stddev=0.1),
bias_initializer=tf.truncated_normal_initializer(mean=0.01, stddev=0.1))
learning_rate = 1e-4
optimizer = tf.train.AdamOptimizer(learning_rate)
predicted_labels = y_
true_labels = features_placeholder[:, -1]
assert predicted_labels.shape[1] == true_labels.shape[1], str(predicted_labels.shape[1]) + ' ' + str(
true_labels.shape[1])
cost = tf.reduce_sum(tf.squared_difference(predicted_labels, true_labels), name='cost')
tf.summary.scalar(name='cost', tensor=cost)
grads_and_vars = optimizer.compute_gradients(loss=cost) # list of (gradient, variable) tuples
train_step = optimizer.apply_gradients(grads_and_vars)
mse = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(true_labels, predicted_labels))))
tf.summary.scalar(tensor=mse, name='MSE')
return features_placeholder, seed, train_step, y_
def define_event_rnn_model(dense_hidden_units, length_of_each_sequence, number_of_layers, lstm_size):
"""
Placeholder expects first the price and the time to the next change as input.
:param dense_hidden_units:
:param future_prediction:
:param length_of_each_sequence:
:param number_of_layers:
:param lstm_size:
:return:
"""
# [price, time_to_last_event]
features_placeholder = tf.placeholder(tf.float32, [None, length_of_each_sequence, 2], name='features_placeholder')
seed = tf.placeholder(tf.int64, shape=[])
def lstm_cell():
return tf.nn.rnn_cell.LSTMCell(lstm_size)
stacked_lstm = [lstm_cell() for _ in range(number_of_layers)]
# create a RNN cell composed sequentially of a number of RNNCells
multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(stacked_lstm)
# 'outputs' is a tensor of shape [batch_size, max_time, 256]
# 'state' is a N-tuple where N is the number of LSTMCells containing a
# tf.contrib.rnn.LSTMStateTuple for each cell
outputs, state = tf.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=features_placeholder[:, :-1],
dtype=tf.float32)
# final_state = state[-1]
dense = tf.layers.dense(outputs[:, -1], dense_hidden_units, activation=tf.nn.relu, name='dense',
kernel_initializer=tf.truncated_normal_initializer(mean=0.001, stddev=0.1),
bias_initializer=tf.truncated_normal_initializer(mean=0.01, stddev=0.1))
dense_time_of_change = tf.layers.dense(dense, dense_hidden_units, activation=tf.nn.relu,
name='dense_time_of_change',
kernel_initializer=tf.truncated_normal_initializer(mean=0.001, stddev=0.1),
bias_initializer=tf.truncated_normal_initializer(mean=0.01, stddev=0.1))
y_time_of_change = tf.layers.dense(dense_time_of_change, 1, activation=None, name='y_time_of_change',
kernel_initializer=tf.truncated_normal_initializer(mean=0.001, stddev=0.1),
bias_initializer=tf.truncated_normal_initializer(mean=0.01, stddev=0.1))
dense_price = tf.layers.dense(dense, dense_hidden_units, activation=tf.nn.relu,
name='dense_price',
kernel_initializer=tf.truncated_normal_initializer(mean=0.001, stddev=0.1),
bias_initializer=tf.truncated_normal_initializer(mean=0.01, stddev=0.1))
y_price = tf.layers.dense(dense_price, 1, activation=None, name='y_dense_price',
kernel_initializer=tf.truncated_normal_initializer(mean=0.001, stddev=0.1),
bias_initializer=tf.truncated_normal_initializer(mean=0.01, stddev=0.1))
learning_rate = 1e-4
optimizer = tf.train.AdamOptimizer(learning_rate)
predicted_labels = tf.concat([y_price, y_time_of_change], 1)
true_labels = features_placeholder[:, -1]
assert predicted_labels.shape[1] == true_labels.shape[1], str(predicted_labels.shape[1]) + ' ' + str(
true_labels.shape[1])
# We could introduce linear coefficients for the cost function
cost = tf.reduce_sum(tf.squared_difference(predicted_labels, true_labels), name='cost')
tf.summary.scalar(name='cost', tensor=cost)
grads_and_vars = optimizer.compute_gradients(loss=cost) # list of (gradient, variable) tuples
train_step = optimizer.apply_gradients(grads_and_vars)
mse = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(true_labels, predicted_labels))))
tf.summary.scalar(tensor=mse, name='MSE')
return features_placeholder, seed, train_step, y_price, y_time_of_change
def calculate_samples(gas_station_resampled, length_of_each_sequence):
features = []
sequence = []
for value in gas_station_resampled.values:
if len(sequence) < length_of_each_sequence - 1:
sequence.append([value[0]])
elif len(sequence) < length_of_each_sequence:
sequence.append([value[0]])
assert len(sequence) == length_of_each_sequence
features.append(sequence)
sequence = []
else:
raise NotImplementedError()
return features
def combine_channels_and_generate_sequences(gas_station_series, sequence_length, sequence_stride=1):
# input: list of gas stations [[t1,t2,t3,...],[b1,b2,v3...]]
assert sequence_length >= 1, "Must use a positive sequence length, used {}.".format(sequence_length)
assert sequence_stride > 0, "Must use a positive stride, used {}.".format(sequence_stride)
features = []
gas_stations = np.array(gas_station_series)
available_timesteps = gas_stations.T.shape[0]
assert sequence_length <= available_timesteps, "Sequence length must be longer than the available time steps in the sequence."
assert sequence_stride < available_timesteps, "Stride must be smaller than the length of the time series"
for i in range(available_timesteps - sequence_length)[::sequence_stride]:
features.append(gas_stations.T[i:i + sequence_length])
return np.array(features)
def build_dataset(X_train, batch_size, seed):
dataset = tf.data.Dataset.from_tensor_slices(X_train)
dataset = dataset.shuffle(seed=seed, buffer_size=len(X_train) + 1)
dataset = dataset.batch(batch_size)
iterator = dataset.make_initializable_iterator()
next_elem = iterator.get_next()
return iterator, next_elem
def load_station(gas_station_id):
p = os.path.join(GAS_PRICE_PATH, '{}.csv'.format(gas_station_id))
try:
return pd.read_csv(p, names=['Timestamp', 'Price'], index_col='Timestamp', parse_dates=['Timestamp'], sep=';')
except FileNotFoundError:
raise ValueError(
'You tried to retrieve the history for gas station with id {}, but file {} was no found.'.format(
gas_station_id, p))
def build_df_for_all_gas_stations():
raise NotImplementedError()
min_start_date = gas_stations_df.min
max_end_date = gas_stations_df.max
for gas_station_row in gas_stations_df.iterrows():
try:
gas_station = pd.read_csv(os.path.join(GAS_PRICE_PATH, '{}.csv'.format(gas_station_row)),
names=['Timestamp', 'Price'], index_col='Timestamp', parse_dates=['Timestamp'],
sep=';')
if min_start_date > gas_station.index.min:
min_start_date = gas_station.index.min
if max_end_date < gas_station.index.max:
max_end_date = gas_station.index.max
except FileNotFoundError:
pass
gas_stations = []
def main():
args = parse_arguments()
model = args.model
gas_station_id = args.gas_station
gas_station_df = load_station(gas_station_id)
# overall model params
lstm_size = args.lstm_size
number_of_layers = args.layers
batch_size = args.batch_size
dense_hidden_units = args.dense_hidden_units
length_of_each_sequence = args.length_of_sequence
frequency = args.resampling
sequence_stride = args.sequence_stride
# TODO: raining arg
start_from_day = 12
if model == EVENT_RNN_MODEL:
if len(args.additional_gas_stations) > 0:
raise NotImplementedError("Close gas stations not available for this model.")
if args.future_prediction != 1:
raise NotImplementedError("This model will only predict one next event.")
deltas = | pd.Series(gas_station_df.index[1:] - gas_station_df.index[:-1]) | pandas.Series |
# Import Library
import pandas as pd
import numpy as np
import os
from pathlib import Path
import sys
import multiprocessing
from manage_path import *
def read_data(file_name,low_memory=False,memory_map=True,engine='c'):
"""Read FINRA TRACE data and perform date conversion and data merging with Mergent FISD"""
# Prepare data file path
dataset_directory = get_dataset_directory()
file_path = dataset_directory / file_name
# Only get the field we want
field_of_interest_datetime = ['BOND_SYM_ID','CUSIP_ID','SCRTY_TYPE_CD','ENTRD_VOL_QT','RPTD_PR','RPT_SIDE_CD' \
,'TRD_EXCTN_DT','EXCTN_TM','TRD_RPT_DT','TRD_RPT_TM', 'Report_Dealer_Index'\
,'Contra_Party_Index','TRC_ST']
data_dtype={'BOND_SYM_ID': str, 'CUSIP_ID': str,'SCRTY_TYPE_CD':str, 'ENTRD_VOL_QT': np.float64, 'RPTD_PR': np.float32 \
,'RPT_SIDE_CD':str, 'Report_Dealer_Index': str,'Contra_Party_Index': str, 'TRC_ST':str}
parse_dates = {'TRD_RPT_DTTM':['TRD_RPT_DT','TRD_RPT_TM'],'TRD_EXCTN_DTTM':['TRD_EXCTN_DT','EXCTN_TM']}
date_parser = lambda x: | pd.to_datetime(x, format='%Y%m%d %H%M%S', errors='coerce') | pandas.to_datetime |
#!/bin/env python3
"""
Copyright (C) 2021 - University of Mons and University Antwerpen
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
This file reads a CSV file produced by a JSON benchmarks run and extract statistics.
These statistics are written in a table that can immediately be imported in LaTeX.
"""
import sys
from collections import defaultdict
import pandas
import numpy as np
filepath = sys.argv[1]
timelimit_in_s = int(sys.argv[2])
name = sys.argv[3]
df = pandas.read_csv(filepath)
# Retrieving number of timeouts and errors
column = "Total time (ms)"
n_timeouts = len(df[df[column] == "Timeout"])
n_errors = len(df[df[column] == "Error"])
# Number of queries, sizes of counterexample, of sets, and of learnt ROCA
columns = [
"Membership queries",
"Counter value queries",
"Partial equivalence queries",
"Equivalence queries",
"Rounds",
"Openness",
"Sigma-inconsistencies",
"Bottom-inconsistencies",
"Mismatches",
"Length longest cex",
"|R|",
"|S|",
"|? \\ S|",
"# of bin rows",
"Result alphabet size",
"Result ROCA size"
]
numbers = df[columns]
numbers = numbers.drop(numbers[numbers[columns[0]] == "Timeout"].index)
numbers = numbers.drop(numbers[numbers[columns[0]] == "Error"].index)
for column in columns:
numbers[column] = pandas.to_numeric(numbers[column])
numbers["|Ŝ|"] = numbers["|S|"] + numbers["|? \\ S|"]
numbers_mean = numbers.mean()
columns = [
"Total time (ms)",
]
total_time = df[columns]
total_time = total_time.replace(to_replace="Timeout", value=timelimit_in_s * 1000) # * 1000 because we store milliseconds
total_time = total_time.drop(total_time[total_time[columns[0]] == "Error"].index)
for column in columns:
total_time[column] = pandas.to_numeric(total_time[column])
# From milliseconds to seconds
total_time[column] = total_time[column].transform(lambda x: x / 1000)
total_time_mean = total_time.mean()
columns = [
"ROCA counterexample time (ms)",
"DFA counterexample time (ms)",
"Learning DFA time (ms)",
"Table time (ms)",
"Finding descriptions (ms)"
]
time = df[columns]
time = time.drop(time[time[columns[0]] == "Timeout"].index)
time = time.drop(time[time[columns[0]] == "Error"].index)
for column in columns:
time[column] = | pandas.to_numeric(time[column]) | pandas.to_numeric |
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import locale
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, StringIO, u
from pandas._libs.tslib import Timestamp
import pandas._libs.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encode_decimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.99999999999999944")
encoded = ujson.encode(sut, double_precision=15)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
@pytest.mark.parametrize("ensure_ascii", [True, False])
def test_encode_string_conversion(self, ensure_ascii):
string_input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(string_input,
ensure_ascii=ensure_ascii,
**encode_kwargs)
assert output == expected_output
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, encode_html_chars=True)
@pytest.mark.parametrize("long_number", [
-4342969734183514, -12345678901234.56789012, -528656961.4399388
])
def test_double_long_numbers(self, long_number):
sut = {u("a"): long_number}
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_encode_non_c_locale(self):
lc_category = locale.LC_NUMERIC
# We just need one of these locales to work.
for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
if tm.can_set_locale(new_locale, lc_category):
with tm.set_locale(new_locale, lc_category):
assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60
assert ujson.loads("4.78", precise_float=True) == 4.78
break
def test_decimal_decode_test_precise(self):
sut = {u("a"): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
assert sut == decoded
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
reason="buggy on win-64 for py2")
def test_encode_double_tiny_exponential(self):
num = 1e-40
assert num == ujson.decode(ujson.encode(num))
num = 1e-100
assert num == ujson.decode(ujson.encode(num))
num = -1e-45
assert num == ujson.decode(ujson.encode(num))
num = -1e-145
assert np.allclose(num, ujson.decode(ujson.encode(num)))
@pytest.mark.parametrize("unicode_key", [
u("key1"), u("بن")
])
def test_encode_dict_with_unicode_keys(self, unicode_key):
unicode_dict = {unicode_key: u("value1")}
assert unicode_dict == ujson.decode(ujson.encode(unicode_dict))
@pytest.mark.parametrize("double_input", [
math.pi,
-math.pi # Should work with negatives too.
])
def test_encode_double_conversion(self, double_input):
output = ujson.encode(double_input)
assert round(double_input, 5) == round(json.loads(output), 5)
assert round(double_input, 5) == round(ujson.decode(output), 5)
def test_encode_with_decimal(self):
decimal_input = 1.0
output = ujson.encode(decimal_input)
assert output == "1.0"
def test_encode_array_of_nested_arrays(self):
nested_input = [[[[]]]] * 20
output = ujson.encode(nested_input)
assert nested_input == json.loads(output)
assert nested_input == ujson.decode(output)
nested_input = np.array(nested_input)
tm.assert_numpy_array_equal(nested_input, ujson.decode(
output, numpy=True, dtype=nested_input.dtype))
def test_encode_array_of_doubles(self):
doubles_input = [31337.31337, 31337.31337,
31337.31337, 31337.31337] * 10
output = ujson.encode(doubles_input)
assert doubles_input == json.loads(output)
assert doubles_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(doubles_input),
ujson.decode(output, numpy=True))
def test_double_precision(self):
double_input = 30.012345678901234
output = ujson.encode(double_input, double_precision=15)
assert double_input == json.loads(output)
assert double_input == ujson.decode(output)
for double_precision in (3, 9):
output = ujson.encode(double_input,
double_precision=double_precision)
rounded_input = round(double_input, double_precision)
assert rounded_input == json.loads(output)
assert rounded_input == ujson.decode(output)
@pytest.mark.parametrize("invalid_val", [
20, -1, "9", None
])
def test_invalid_double_precision(self, invalid_val):
double_input = 30.12345678901234567890
expected_exception = (ValueError if isinstance(invalid_val, int)
else TypeError)
with pytest.raises(expected_exception):
ujson.encode(double_input, double_precision=invalid_val)
def test_encode_string_conversion2(self):
string_input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(string_input)
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
@pytest.mark.parametrize("unicode_input", [
"Räksmörgås اسامة بن محمد بن عوض بن لادن",
"\xe6\x97\xa5\xd1\x88"
])
def test_encode_unicode_conversion(self, unicode_input):
enc = ujson.encode(unicode_input)
dec = ujson.decode(enc)
assert enc == json_unicode(unicode_input)
assert dec == json.loads(enc)
def test_encode_control_escaping(self):
escaped_input = "\x19"
enc = ujson.encode(escaped_input)
dec = ujson.decode(enc)
assert escaped_input == dec
assert enc == json_unicode(escaped_input)
def test_encode_unicode_surrogate_pair(self):
surrogate_input = "\xf0\x90\x8d\x86"
enc = ujson.encode(surrogate_input)
dec = ujson.decode(enc)
assert enc == json_unicode(surrogate_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8(self):
four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8highest(self):
four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_array_in_array(self):
arr_in_arr_input = [[[[]]]]
output = ujson.encode(arr_in_arr_input)
assert arr_in_arr_input == json.loads(output)
assert output == json.dumps(arr_in_arr_input)
assert arr_in_arr_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(arr_in_arr_input),
ujson.decode(output, numpy=True))
@pytest.mark.parametrize("num_input", [
31337,
-31337, # Negative number.
-9223372036854775808 # Large negative number.
])
def test_encode_num_conversion(self, num_input):
output = ujson.encode(num_input)
assert num_input == json.loads(output)
assert output == json.dumps(num_input)
assert num_input == ujson.decode(output)
def test_encode_list_conversion(self):
list_input = [1, 2, 3, 4]
output = ujson.encode(list_input)
assert list_input == json.loads(output)
assert list_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(list_input),
ujson.decode(output, numpy=True))
def test_encode_dict_conversion(self):
dict_input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
output = ujson.encode(dict_input)
assert dict_input == json.loads(output)
assert dict_input == ujson.decode(output)
@pytest.mark.parametrize("builtin_value", [None, True, False])
def test_encode_builtin_values_conversion(self, builtin_value):
output = ujson.encode(builtin_value)
assert builtin_value == json.loads(output)
assert output == json.dumps(builtin_value)
assert builtin_value == ujson.decode(output)
def test_encode_datetime_conversion(self):
datetime_input = datetime.datetime.fromtimestamp(time.time())
output = ujson.encode(datetime_input, date_unit="s")
expected = calendar.timegm(datetime_input.utctimetuple())
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
def test_encode_date_conversion(self):
date_input = datetime.date.fromtimestamp(time.time())
output = ujson.encode(date_input, date_unit="s")
tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0)
expected = calendar.timegm(tup)
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
@pytest.mark.parametrize("test", [
datetime.time(),
datetime.time(1, 2, 3),
datetime.time(10, 12, 15, 343243),
])
def test_encode_time_conversion_basic(self, test):
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_pytz(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, pytz.utc)
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_dateutil(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
@pytest.mark.parametrize("decoded_input", [
NaT,
np.datetime64("NaT"),
np.nan,
np.inf,
-np.inf
])
def test_encode_as_null(self, decoded_input):
assert ujson.encode(decoded_input) == "null", "Expected null"
def test_datetime_units(self):
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
assert roundtrip == stamp.value // 10**9
roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
assert roundtrip == stamp.value // 10**6
roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
assert roundtrip == stamp.value // 10**3
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
assert roundtrip == stamp.value
pytest.raises(ValueError, ujson.encode, val, date_unit='foo')
def test_encode_to_utf8(self):
unencoded = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(unencoded, ensure_ascii=False)
dec = ujson.decode(enc)
assert enc == json_unicode(unencoded, ensure_ascii=False)
assert dec == json.loads(enc)
def test_decode_from_unicode(self):
unicode_input = u("{\"obj\": 31337}")
dec1 = ujson.decode(unicode_input)
dec2 = ujson.decode(str(unicode_input))
assert dec1 == dec2
def test_encode_recursion_max(self):
# 8 is the max recursion depth
class O2(object):
member = 0
pass
class O1(object):
member = 0
pass
decoded_input = O1()
decoded_input.member = O2()
decoded_input.member.member = decoded_input
with pytest.raises(OverflowError):
ujson.encode(decoded_input)
def test_decode_jibberish(self):
jibberish = "fdsa sda v9sa fdsa"
with pytest.raises(ValueError):
ujson.decode(jibberish)
@pytest.mark.parametrize("broken_json", [
"[", # Broken array start.
"{", # Broken object start.
"]", # Broken array end.
"}", # Broken object end.
])
def test_decode_broken_json(self, broken_json):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("too_big_char", [
"[",
"{",
])
def test_decode_depth_too_big(self, too_big_char):
with pytest.raises(ValueError):
ujson.decode(too_big_char * (1024 * 1024))
@pytest.mark.parametrize("bad_string", [
"\"TESTING", # Unterminated.
"\"TESTING\\\"", # Unterminated escape.
"tru", # Broken True.
"fa", # Broken False.
"n", # Broken None.
])
def test_decode_bad_string(self, bad_string):
with pytest.raises(ValueError):
ujson.decode(bad_string)
@pytest.mark.parametrize("broken_json", [
'{{1337:""}}',
'{{"key":"}',
'[[[true',
])
def test_decode_broken_json_leak(self, broken_json):
for _ in range(1000):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("invalid_dict", [
"{{{{31337}}}}", # No key.
"{{{{\"key\":}}}}", # No value.
"{{{{\"key\"}}}}", # No colon or value.
])
def test_decode_invalid_dict(self, invalid_dict):
with pytest.raises(ValueError):
ujson.decode(invalid_dict)
@pytest.mark.parametrize("numeric_int_as_str", [
"31337", "-31337" # Should work with negatives.
])
def test_decode_numeric_int(self, numeric_int_as_str):
assert int(numeric_int_as_str) == ujson.decode(numeric_int_as_str)
@pytest.mark.skipif(compat.PY3, reason="only PY2")
def test_encode_unicode_4bytes_utf8_fail(self):
with pytest.raises(OverflowError):
ujson.encode("\xfd\xbf\xbf\xbf\xbf\xbf")
def test_encode_null_character(self):
wrapped_input = "31337 \x00 1337"
output = ujson.encode(wrapped_input)
assert wrapped_input == json.loads(output)
assert output == json.dumps(wrapped_input)
assert wrapped_input == ujson.decode(output)
alone_input = "\x00"
output = ujson.encode(alone_input)
assert alone_input == json.loads(output)
assert output == json.dumps(alone_input)
assert alone_input == ujson.decode(output)
assert '" \\u0000\\r\\n "' == ujson.dumps(u(" \u0000\r\n "))
def test_decode_null_character(self):
wrapped_input = "\"31337 \\u0000 31337\""
assert ujson.decode(wrapped_input) == json.loads(wrapped_input)
def test_encode_list_long_conversion(self):
long_input = [9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807]
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert long_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(long_input),
ujson.decode(output, numpy=True,
dtype=np.int64))
def test_encode_long_conversion(self):
long_input = 9223372036854775807
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert output == json.dumps(long_input)
assert long_input == ujson.decode(output)
@pytest.mark.parametrize("int_exp", [
"1337E40", "1.337E40", "1337E+9", "1.337e+40", "1.337E-4"
])
def test_decode_numeric_int_exp(self, int_exp):
assert ujson.decode(int_exp) == json.loads(int_exp)
def test_dump_to_file(self):
f = StringIO()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.getvalue()
def test_dump_to_file_like(self):
class FileLike(object):
def __init__(self):
self.bytes = ''
def write(self, data_bytes):
self.bytes += data_bytes
f = FileLike()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.bytes
def test_dump_file_args_error(self):
with pytest.raises(TypeError):
ujson.dump([], "")
def test_load_file(self):
data = "[1,2,3,4]"
exp_data = [1, 2, 3, 4]
f = StringIO(data)
assert exp_data == ujson.load(f)
f = StringIO(data)
tm.assert_numpy_array_equal(np.array(exp_data),
ujson.load(f, numpy=True))
def test_load_file_like(self):
class FileLike(object):
def read(self):
try:
self.end
except AttributeError:
self.end = True
return "[1,2,3,4]"
exp_data = [1, 2, 3, 4]
f = FileLike()
assert exp_data == ujson.load(f)
f = FileLike()
tm.assert_numpy_array_equal(np.array(exp_data),
ujson.load(f, numpy=True))
def test_load_file_args_error(self):
with pytest.raises(TypeError):
ujson.load("[]")
def test_version(self):
assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
"ujson.__version__ must be a string like '1.4.0'"
def test_encode_numeric_overflow(self):
with pytest.raises(OverflowError):
ujson.encode(12839128391289382193812939)
def test_encode_numeric_overflow_nested(self):
class Nested(object):
x = 12839128391289382193812939
for _ in range(0, 100):
with pytest.raises(OverflowError):
ujson.encode(Nested())
@pytest.mark.parametrize("val", [
3590016419, 2**31, 2**32, (2**32) - 1
])
def test_decode_number_with_32bit_sign_bit(self, val):
# Test that numbers that fit within 32 bits but would have the
# sign bit set (2**31 <= x < 2**32) are decoded properly.
doc = '{{"id": {val}}}'.format(val=val)
assert ujson.decode(doc)["id"] == val
def test_encode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5"
escape_input = base * 1024 * 1024 * 2
ujson.encode(escape_input)
def test_decode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5"
quote = compat.str_to_bytes("\"")
escape_input = quote + (base * 1024 * 1024 * 2) + quote
ujson.decode(escape_input)
def test_to_dict(self):
d = {u("key"): 31337}
class DictTest(object):
def toDict(self):
return d
o = DictTest()
output = ujson.encode(o)
dec = ujson.decode(output)
assert dec == d
def test_default_handler(self):
class _TestObject(object):
def __init__(self, val):
self.val = val
@property
def recursive_attr(self):
return _TestObject("recursive_attr")
def __str__(self):
return str(self.val)
pytest.raises(OverflowError, ujson.encode, _TestObject("foo"))
assert '"foo"' == ujson.encode(_TestObject("foo"),
default_handler=str)
def my_handler(_):
return "foobar"
assert '"foobar"' == ujson.encode(_TestObject("foo"),
default_handler=my_handler)
def my_handler_raises(_):
raise TypeError("I raise for anything")
with pytest.raises(TypeError, match="I raise for anything"):
ujson.encode(_TestObject("foo"), default_handler=my_handler_raises)
def my_int_handler(_):
return 42
assert ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_int_handler)) == 42
def my_obj_handler(_):
return datetime.datetime(2013, 2, 3)
assert (ujson.decode(ujson.encode(datetime.datetime(2013, 2, 3))) ==
ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_obj_handler)))
obj_list = [_TestObject("foo"), _TestObject("bar")]
assert (json.loads(json.dumps(obj_list, default=str)) ==
ujson.decode(ujson.encode(obj_list, default_handler=str)))
class TestNumpyJSONTests(object):
@pytest.mark.parametrize("bool_input", [True, False])
def test_bool(self, bool_input):
b = np.bool(bool_input)
assert ujson.decode(ujson.encode(b)) == b
def test_bool_array(self):
bool_array = np.array([
True, False, True, True,
False, True, False, False], dtype=np.bool)
output = np.array(ujson.decode(
ujson.encode(bool_array)), dtype=np.bool)
tm.assert_numpy_array_equal(bool_array, output)
def test_int(self, any_int_dtype):
klass = np.dtype(any_int_dtype).type
num = klass(1)
assert klass(ujson.decode(ujson.encode(num))) == num
def test_int_array(self, any_int_dtype):
arr = np.arange(100, dtype=np.int)
arr_input = arr.astype(any_int_dtype)
arr_output = np.array(ujson.decode(ujson.encode(arr_input)),
dtype=any_int_dtype)
tm.assert_numpy_array_equal(arr_input, arr_output)
def test_int_max(self, any_int_dtype):
if any_int_dtype in ("int64", "uint64") and compat.is_platform_32bit():
pytest.skip("Cannot test 64-bit integer on 32-bit platform")
klass = np.dtype(any_int_dtype).type
# uint64 max will always overflow,
# as it's encoded to signed.
if any_int_dtype == "uint64":
num = np.iinfo("int64").max
else:
num = np.iinfo(any_int_dtype).max
assert klass(ujson.decode(ujson.encode(num))) == num
def test_float(self, float_dtype):
klass = np.dtype(float_dtype).type
num = klass(256.2013)
assert klass(ujson.decode(ujson.encode(num))) == num
def test_float_array(self, float_dtype):
arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
float_input = arr.astype(float_dtype)
float_output = np.array(ujson.decode(
ujson.encode(float_input, double_precision=15)),
dtype=float_dtype)
tm.assert_almost_equal(float_input, float_output)
def test_float_max(self, float_dtype):
klass = np.dtype(float_dtype).type
num = klass(np.finfo(float_dtype).max / 10)
tm.assert_almost_equal(klass(ujson.decode(
ujson.encode(num, double_precision=15))), num)
def test_array_basic(self):
arr = np.arange(96)
arr = arr.reshape((2, 2, 2, 2, 3, 2))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
@pytest.mark.parametrize("shape", [
(10, 10),
(5, 5, 4),
(100, 1),
])
def test_array_reshaped(self, shape):
arr = np.arange(100)
arr = arr.reshape(shape)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
def test_array_list(self):
arr_list = ["a", list(), dict(), dict(), list(),
42, 97.8, ["a", "b"], {"key": "val"}]
arr = np.array(arr_list)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
def test_array_float(self):
dtype = np.float32
arr = np.arange(100.202, 200.202, 1, dtype=dtype)
arr = arr.reshape((5, 5, 4))
arr_out = np.array(ujson.decode(ujson.encode(arr)), dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
arr_out = ujson.decode(ujson.encode(arr), numpy=True, dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
def test_0d_array(self):
with pytest.raises(TypeError):
ujson.encode(np.array(1))
@pytest.mark.parametrize("bad_input,exc_type,kwargs", [
([{}, []], ValueError, {}),
([42, None], TypeError, {}),
([["a"], 42], ValueError, {}),
([42, {}, "a"], TypeError, {}),
([42, ["a"], 42], ValueError, {}),
(["a", "b", [], "c"], ValueError, {}),
([{"a": "b"}], ValueError, dict(labelled=True)),
({"a": {"b": {"c": 42}}}, ValueError, dict(labelled=True)),
([{"a": 42, "b": 23}, {"c": 17}], ValueError, dict(labelled=True))
])
def test_array_numpy_except(self, bad_input, exc_type, kwargs):
with pytest.raises(exc_type):
ujson.decode(ujson.dumps(bad_input), numpy=True, **kwargs)
def test_array_numpy_labelled(self):
labelled_input = {"a": []}
output = ujson.loads(ujson.dumps(labelled_input),
numpy=True, labelled=True)
assert (np.empty((1, 0)) == output[0]).all()
assert (np.array(["a"]) == output[1]).all()
assert output[2] is None
labelled_input = [{"a": 42}]
output = ujson.loads(ujson.dumps(labelled_input),
numpy=True, labelled=True)
assert (np.array([u("a")]) == output[2]).all()
assert (np.array([42]) == output[0]).all()
assert output[1] is None
# see gh-10837: write out the dump explicitly
# so there is no dependency on iteration order
input_dumps = ('[{"a": 42, "b":31}, {"a": 24, "c": 99}, '
'{"a": 2.4, "b": 78}]')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expected_vals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expected_vals == output[0]).all()
assert output[1] is None
assert (np.array([u("a"), "b"]) == output[2]).all()
input_dumps = ('{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, '
'"3": {"a": 2.4, "b": 78}}')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expected_vals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expected_vals == output[0]).all()
assert (np.array(["1", "2", "3"]) == output[1]).all()
assert (np.array(["a", "b"]) == output[2]).all()
class TestPandasJSONTests(object):
def test_dataframe(self, orient, numpy):
if orient == "records" and numpy:
pytest.skip("Not idiomatic pandas")
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"])
encode_kwargs = {} if orient is None else dict(orient=orient)
decode_kwargs = {} if numpy is None else dict(numpy=numpy)
output = ujson.decode(ujson.encode(df, **encode_kwargs),
**decode_kwargs)
# Ensure proper DataFrame initialization.
if orient == "split":
dec = _clean_dict(output)
output = DataFrame(**dec)
else:
output = DataFrame(output)
# Corrections to enable DataFrame comparison.
if orient == "values":
df.columns = [0, 1, 2]
df.index = [0, 1]
elif orient == "records":
df.index = [0, 1]
elif orient == "index":
df = df.transpose()
tm.assert_frame_equal(output, df, check_dtype=False)
def test_dataframe_nested(self, orient):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"])
nested = {"df1": df, "df2": df.copy()}
kwargs = {} if orient is None else dict(orient=orient)
exp = {"df1": ujson.decode(ujson.encode(df, **kwargs)),
"df2": ujson.decode(ujson.encode(df, **kwargs))}
assert ujson.decode(ujson.encode(nested, **kwargs)) == exp
def test_dataframe_numpy_labelled(self, orient):
if orient in ("split", "values"):
pytest.skip("Incompatible with labelled=True")
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"], dtype=np.int)
kwargs = {} if orient is None else dict(orient=orient)
output = DataFrame(*ujson.decode(ujson.encode(df, **kwargs),
numpy=True, labelled=True))
if orient is None:
df = df.T
elif orient == "records":
df.index = [0, 1]
tm.assert_frame_equal(output, df)
def test_series(self, orient, numpy):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
encode_kwargs = {} if orient is None else dict(orient=orient)
decode_kwargs = {} if numpy is None else dict(numpy=numpy)
output = ujson.decode(ujson.encode(s, **encode_kwargs),
**decode_kwargs)
if orient == "split":
dec = _clean_dict(output)
output = Series(**dec)
else:
output = Series(output)
if orient in (None, "index"):
s.name = None
output = output.sort_values()
s.index = ["6", "7", "8", "9", "10", "15"]
elif orient in ("records", "values"):
s.name = None
s.index = [0, 1, 2, 3, 4, 5]
tm.assert_series_equal(output, s, check_dtype=False)
def test_series_nested(self, orient):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
nested = {"s1": s, "s2": s.copy()}
kwargs = {} if orient is None else dict(orient=orient)
exp = {"s1": ujson.decode(ujson.encode(s, **kwargs)),
"s2": ujson.decode(ujson.encode(s, **kwargs))}
assert ujson.decode(ujson.encode(nested, **kwargs)) == exp
def test_index(self):
i = Index([23, 45, 18, 98, 43, 11], name="index")
# Column indexed.
output = Index(ujson.decode(ujson.encode(i)), name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i), numpy=True), name="index")
tm.assert_index_equal(i, output)
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
output = Index(**dec)
tm.assert_index_equal(i, output)
assert i.name == output.name
dec = _clean_dict(ujson.decode( | ujson.encode(i, orient="split") | pandas._libs.json.encode |
from collections import deque
import jax
import numpy as np
import pandas as pd
from skfda import FDataGrid
from skfda.representation.basis import Fourier
from tensorly.decomposition import tucker
from tensorly.tenalg import mode_dot
from finger_sense.utility import KL_divergence_normal, normalize
class Processor:
def __init__(self, dirs, latent_dim, n_basis, model_name='Gaussian'):
self.latent_dim = latent_dim
self.model_name = model_name
self.basis = Fourier([0, 2 * np.pi], n_basis=n_basis, period=1)
self.jacobian = jax.jacfwd(KL_divergence_normal, 0)
self.sensory_memory = deque(maxlen=2*n_basis-1)
self.init_model(dirs)
def init_model(self, dirs):
'''
Load prior knowldge and initialize the perception model
...
Parameters
----------
dirs : list of strings
Directories of core, factors
'''
if None in dirs.values():
self.core = None
self.factors = None
self.features = None
else:
self.core = pd.read_csv(dirs['core_dir'])
self.factors = np.load(dirs['factor_dir'], allow_pickle=True)[0:2]
self.features = {}
class_names = | pd.unique(self.core['material']) | pandas.unique |
from torch.utils.data import Dataset
from cmapPy.pandasGEXpress.parse import parse
from torch.utils.data import DataLoader
from torch.utils.data.sampler import Sampler
import numpy as np
import os
import ai.causalcell.utils.register as register
from rdkit import Chem
import pandas as pd
from rdkit.Chem import AllChem
import random
import pickle
import time
paths_to_L1000_files = {
"phase1": {
"path_to_dir": "Data/L1000_PhaseI",
"path_to_data": "Data/L1000_PhaseI/GSE92742_Broad_LINCS/GSE92742_Broad_LINCS_Level5_COMPZ.MODZ_"
"n473647x12328.gctx",
"path_to_sig_info": "Data/L1000_PhaseI/GSE92742_Broad_LINCS/GSE92742_Broad_LINCS_sig_info.txt",
"path_to_gene_info": "Data/L1000_PhaseI/GSE92742_Broad_LINCS/GSE92742_Broad_LINCS_gene_info.txt",
"path_to_pert_info": "Data/L1000_PhaseI/GSE92742_Broad_LINCS/GSE92742_Broad_LINCS_pert_info.txt",
"path_to_cell_info": "Data/L1000_PhaseI/GSE92742_Broad_LINCS/GSE92742_Broad_LINCS_cell_info.txt"
}, "phase2": {
"path_to_dir": "Data/L1000_PhaseII",
"path_to_data": "Data/L1000_PhaseII/GSE70138_Broad_LINCS/Level5_COMPZ_n118050x12328_2017-03-06.gctx",
"path_to_sig_info": "Data/L1000_PhaseII/GSE70138_Broad_LINCS/sig_info_2017-03-06.txt",
"path_to_gene_info": "Data/L1000_PhaseII/GSE70138_Broad_LINCS/gene_info_2017-03-06.txt",
"path_to_pert_info": "Data/L1000_PhaseII/GSE70138_Broad_LINCS/pert_info_2017-03-06.txt",
"path_to_cell_info": "Data/L1000_PhaseII/GSE70138_Broad_LINCS/cell_info_2017-04-28.txt"
}}
"""Dict of l1000 datasets that will be initialized with the relevant dataset objects if necessary so that all
dataloaders use the same dataset object when possible, in order to instantiate only one."""
dict_of_l1000_datasets = {}
def get_fingerprint(smile, radius, nBits):
try:
if smile == "-666" or smile == "restricted":
return None
return np.array(AllChem.GetMorganFingerprintAsBitVect(Chem.MolFromSmiles(smile), radius, nBits))
except:
return None
def get_concentration(s):
if s.endswith('µM') or s.endswith('um'):
return float(s[:-3])
if s.endswith('nM'):
return 0.001 * float(s[:-3])
return -1
def get_time(s):
return float(s[:-2])
########################################################################################################################
# L1000 dataset
########################################################################################################################
class L1000Dataset(Dataset):
"""
Information on the dataset can be found here:
https://docs.google.com/document/d/1q2gciWRhVCAAnlvF2iRLuJ7whrGP6QjpsCMq1yWz7dU/edit#heading=h.usef9o7fuux3
"""
def __init__(self, phase="phase2", radius=2, nBits=1024):
"""
:param phase: phase 1 or 2 of the dataset
:param radius: parameter for fingerprints https://www.macinchem.org/reviews/clustering/clustering.php
:param nBits: desired length of fingerprints
"""
assert phase in ["phase1", "phase2", "both"]
self.both = (phase == "both")
if phase == "both":
self.phase = "phase1"
else:
self.phase = phase
# fingerprint parameters
self.radius = radius
self.nBits = nBits
# Load metadata
self.cell_info, self.landmark_gene_list, self.pert_info = self.load_metadata()
self.sig_info = self.build_environment_representation()
# Load data
if self.both:
self.data = pd.concat([self.load_data("phase1"), self.load_data("phase2")], sort=False)
else:
self.data = self.load_data(phase)
# Get dictionary of non empty environments
self.env_dict = self.get_env_dict()
def load_data(self, phase):
"""
We store the data as a single column dataframe containing numpy arrays.
It allows a considerable speedup when accessing rows during training
"""
path_to_data = paths_to_L1000_files[phase]["path_to_data"]
df_path = os.path.join(paths_to_L1000_files[phase]["path_to_dir"], "dataframe.pkl")
if os.path.isfile(df_path):
print("Loading data of", phase)
pickle_in = open(df_path, "rb")
data = pickle.load(pickle_in)
else: # If the data has not been saved yet, parse the original file and save dataframe
print("Parsing original data, only happens the first time...")
data = parse(path_to_data, rid=self.landmark_gene_list).data_df.T
# Ensure that the order of columns corresponds to landmark_gene_list
data = data[self.landmark_gene_list]
# Remove rows that are not in sig_info
data = data[data.index.isin(self.sig_info.index)]
# Transform data so that it only has one column
d = pd.DataFrame(index=data.index, columns=["gene_expr"])
d["gene_expr"] = list(data.to_numpy())
data = d
# Save data
pickle_out = open(df_path, "wb")
pickle.dump(data, pickle_out, protocol=2)
pickle_out.close()
return data
def get_env_dict(self):
"""
:return: dict with (pert, cell) keys corresponding to non empty environments
dict[(pert, cell)] contains the list of all corresponding sig_ids
"""
dict_path = os.path.join(paths_to_L1000_files[self.phase]["path_to_dir"], "dict_" + str(self.both) + ".npy")
if os.path.isfile(dict_path): # if the dict has been saved previously, load it
env_dict = np.load(dict_path, allow_pickle='TRUE').item()
else:
print("Building dict of all environments, only happens the first time...")
env_dict = {}
for index, row in self.data.iterrows():
pert = self.sig_info.pert_id.loc[index]
cell = self.sig_info.cell_id.loc[index]
if (pert, cell) in env_dict.keys():
env_dict[(pert, cell)].append(index)
else:
env_dict[(pert, cell)] = [index]
np.save(dict_path, env_dict)
return env_dict
def load_metadata(self):
# cell_info and gene_info files are the same for both phases
cell_info = pd.read_csv(paths_to_L1000_files[self.phase]["path_to_cell_info"],
sep="\t", index_col="cell_id")
cell_info['cell_id'] = cell_info.index # Store cell_id in a column
# Get list of landmark genes
gene_info = pd.read_csv(paths_to_L1000_files[self.phase]["path_to_gene_info"], sep="\t")
landmark_gene_list = gene_info[gene_info['pr_is_lm'] == 1]["pr_gene_id"].astype(str)
# Load pert_info
pert_info = pd.read_csv(paths_to_L1000_files[self.phase]["path_to_pert_info"], sep="\t",
index_col="pert_id", usecols=["pert_id", "canonical_smiles"])
if self.both: # If we want both phases, load the other phase as well
pert_info_2 = pd.read_csv(paths_to_L1000_files["phase2"]["path_to_pert_info"], sep="\t",
index_col="pert_id", usecols=["pert_id", "canonical_smiles"])
pert_info = pd.concat([pert_info, pert_info_2])
# Remove duplicate indices
pert_info = pert_info.loc[~pert_info.index.duplicated(keep='first')]
# Load fingerprints
fps_path = os.path.join(paths_to_L1000_files[self.phase]["path_to_dir"], "fingerprints_" + str(self.radius)
+ "_" + str(self.nBits) + "_" + str(self.both) + ".pkl")
if os.path.isfile(fps_path):
fps = pd.read_pickle(fps_path)
else: # If fingerprints have not been saved yet, compute them
print("Computing fingerprints, only happens the first time...")
fps = pert_info.apply(lambda row: get_fingerprint(row["canonical_smiles"],
self.radius, self.nBits), axis=1)
fps.to_pickle(fps_path) # Save fingerprints
# Add fingerprints to the pert_info file
pert_info["fps"] = fps
return cell_info, landmark_gene_list, pert_info
def build_environment_representation(self):
"""
We store the environment representation in a single column containing numpy arrays.
It allows a considerable speedup when accessing rows during training
"""
sig_info_path = os.path.join(paths_to_L1000_files[self.phase]["path_to_dir"], "sig_info_" + str(self.radius)
+ "_" + str(self.nBits) + "_" + str(self.both) + ".pkl")
if os.path.isfile(sig_info_path):
pickle_in = open(sig_info_path, "rb")
sig_info = pickle.load(pickle_in)
else:
print("Building environment representations, only happens the first time...")
# Load original file
sig_info = pd.read_csv(paths_to_L1000_files[self.phase]["path_to_sig_info"], sep="\t",
index_col="sig_id",
usecols=["sig_id", "pert_id", "cell_id", "pert_idose", "pert_itime"])
if self.both: # If we want both phases, load the other phase as well
sig_info_2 = pd.read_csv(paths_to_L1000_files["phase2"]["path_to_sig_info"], sep="\t",
index_col="sig_id",
usecols=["sig_id", "pert_id", "cell_id", "pert_idose", "pert_itime"])
sig_info = pd.concat([sig_info, sig_info_2])
# Convert time to float and add to sig_info
sig_info['pert_itime_value'] = sig_info['pert_itime'].apply(get_time)
# Convert concentrations to float and add to sig_info
sig_info['pert_idose_value'] = sig_info['pert_idose'].apply(get_concentration)
# Add fingerprints to the sig_info file
pert_id_of_sig = sig_info['pert_id'] # Get all sig_info pert_ids
# Drop rows that are not in pert_info
pert_id_of_sig = pert_id_of_sig.drop(sig_info[~sig_info['pert_id'].isin(self.pert_info.index)]
.index)
sig_fps = self.pert_info.loc[pert_id_of_sig]['fps']
sig_fps.index = pert_id_of_sig.index
sig_fps = sig_fps.reindex(sig_info.index, fill_value=None)
# Add the corresponding fingerprints to sig_info
sig_info['fps'] = sig_fps
# Drop rows that do not have fingerprints
sig_info = sig_info.dropna()
# Create multiple columns that will store one component of the fps per column
fps_df = list(sig_info['fps'])
fps_df = pd.DataFrame(np.array([list(fp) for fp in fps_df]))
fps_df.index = sig_info.index
sig_info = pd.concat([sig_info, fps_df], axis=1, sort=False)
sig_info = sig_info.drop(["fps"], axis=1)
# Add information about the cells in the sig_info file in the form of one hot encoding
cell_id_of_sig = sig_info['cell_id']
# Drop rows that are not in cell_info
cell_id_of_sig = cell_id_of_sig.drop(sig_info[~sig_info['cell_id'].isin(self.cell_info.index)]
.index)
sig_info_of_cell = self.cell_info.loc[cell_id_of_sig][['primary_site', 'original_growth_pattern',
'subtype', 'sample_type', 'cell_type', 'cell_id']]
sig_info_of_cell = pd.get_dummies(sig_info_of_cell) # Get one hot encodings
sig_info_of_cell.index = cell_id_of_sig.index
sig_info_of_cell = sig_info_of_cell.reindex(sig_info.index, fill_value=0)
sig_info = pd.concat([sig_info, sig_info_of_cell], axis=1, sort=False)
# Transform sig_info so that env_repr is stored in a single column as a numpy array
env_repr = | pd.DataFrame(index=sig_info.index, columns=["env_repr"]) | pandas.DataFrame |
import glob
import json
import os
from wsgiref.util import FileWrapper
import shutil
import pandas as pd
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http.response import HttpResponse
from django.shortcuts import render
from interface.models import DbQuery
from interface.models import StarsFilter
@login_required(login_url='login/')
def all_filters(request):
stars_filters_path = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "stars_filters")
header = ["Job id", "Status", "Start date",
"Finish date", "Descriptors", "Deciders", "Link"]
dat = []
for star_filt in StarsFilter.objects.filter(user=request.user):
row = [star_filt.id,
star_filt.status,
str(star_filt.start_date),
str(star_filt.finish_date),
star_filt.descriptors.replace(";", "<br>"),
star_filt.deciders,
str(star_filt.id)]
dat.append(row)
table = pd.DataFrame(
dat, columns=["fold_name", "status", "start", "stop", "descr", "decid", "job_id"])
table["start"] = pd.to_datetime(table["start"])
table.sort_values(by="start", ascending=False, inplace=True)
job_ids = table["job_id"].values.tolist()
table = table.drop('job_id', 1)
return render(request, 'interface/jobs.html', {"page_title": "Star filter jobs",
"header": header,
"stars_filter": True,
"delete_prefix" : '"../{}/delete/"'.format(os.environ.get(
"DOCKYARD_APP_CONTEXT"), ""),
"table": zip(table.values.tolist(), job_ids)})
@login_required(login_url='login/')
def _all_filters(request):
stars_filters_path = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "stars_filters")
header = ["Job id", "Status", "Date", "Descriptors", "Deciders", "Link"]
dat = []
for folder_name in os.listdir(stars_filters_path):
try:
with open(os.path.join(stars_filters_path, folder_name, "status.json"), 'r') as status_file:
status = json.load(status_file)
row = [folder_name,
status.get("status", ""),
status.get("start", ""),
status.get("descriptors", ""),
status.get("deciders", ""),
str(folder_name)]
dat.append(row)
except:
pass
table = pd.DataFrame(
dat, columns=["fold_name", "status", "start", "descr", "decid", "job_id"])
table["start"] = | pd.to_datetime(table["start"]) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# #### Author : <NAME>
# #### Topic : Multiple Linear Regression : Car Price Prediction
# #### Email : <EMAIL>
# It is the extension of simple linear regression that predicts a response using two or more features. Mathematically we can explain it as follows −
#
# Consider a dataset having n observations, p features i.e. independent variables and y as one response i.e. dependent variable the regression line for p features can be calculated as follows −
#
# h(xi)=b0+b1xi1+b2xi2+⋯+bpxip
# Here, h(xi) is the predicted response value and b0,b1,b2,⋯bp are the regression coefficients.
#
# Multiple Linear Regression models always includes the errors in the data known as residual error which changes the calculation as follows −
#
# h(xi)=b0+b1xi1+b2xi2+⋯+bpxip+ei
# We can also write the above equation as follows −
#
# yi=h(xi)+eiorei=yi−h(xi)
# #### Import Libraries
# In[ ]:
import numpy as np
import pandas as pd
# #### Import Datasets
# In[ ]:
df = pd.read_csv('datasets_794035_1363047_carprices.csv')
df
# In[ ]:
### Dummy Variables
dummies = pd.get_dummies(df['Car Model'])
dummies
# In[ ]:
### Merge Datasets
merge = | pd.concat([df,dummies],axis='columns') | pandas.concat |
import json
import os
import sys
import tensorflow as tf
from keras import backend as K
from keras import optimizers, utils
from keras.callbacks import CSVLogger
from keras.engine import Model
from keras.layers import Dropout, Flatten, Dense
from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D
from src.GrayscaleModels.densenet_gray import DenseNet169
from src.train.GradientCheckpointing import memory_saving_gradients
WDIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(WDIR, "GradientCheckpointing"))
sys.path.insert(0, os.path.join(WDIR, "../GrayscaleModels"))
K.__dict__["gradients"] = memory_saving_gradients.gradients_memory
import pandas as pd
import numpy as np
import scipy.misc
from scipy.ndimage.interpolation import rotate
from scipy.ndimage.filters import gaussian_filter
from skimage import exposure
from sklearn.metrics import roc_auc_score, cohen_kappa_score, accuracy_score, f1_score
def get_model(base_model,
layer,
lr=1e-3,
input_shape=(224, 224, 1),
classes=2,
activation="softmax",
dropout=None,
pooling="avg",
weights=None,
pretrained="imagenet"):
global x
base = base_model(input_shape=input_shape,
include_top=False,
weights=pretrained,
channels="gray")
if pooling == "avg":
x = GlobalAveragePooling2D()(base.output)
elif pooling == "max":
x = GlobalMaxPooling2D()(base.output)
elif pooling is None:
x = Flatten()(base.output)
if dropout is not None:
x = Dropout(dropout)(x)
x = Dense(classes, activation=activation)(x)
model = Model(inputs=base.input, outputs=x)
if weights is not None:
model.load_weights(weights)
for l in model.layers[:layer]:
l.trainable = False
model.compile(loss="binary_crossentropy", metrics=["accuracy"],
optimizer=optimizers.Adam(lr))
return model
##########
## DATA ##
##########
# == PREPROCESSING == #
def preprocess_input(x, model):
x = x.astype("float32")
if model in ("inception", "xception", "mobilenet"):
x /= 255.
x -= 0.5
x *= 2.
if model in ("densenet"):
x /= 255.
if x.shape[-1] == 3:
x[..., 0] -= 0.485
x[..., 1] -= 0.456
x[..., 2] -= 0.406
x[..., 0] /= 0.229
x[..., 1] /= 0.224
x[..., 2] /= 0.225
elif x.shape[-1] == 1:
x[..., 0] -= 0.449
x[..., 0] /= 0.226
elif model in ("resnet", "vgg"):
if x.shape[-1] == 3:
x[..., 0] -= 103.939
x[..., 1] -= 116.779
x[..., 2] -= 123.680
elif x.shape[-1] == 1:
x[..., 0] -= 115.799
return x
def apply_clahe(img):
img = img / 255.
img = exposure.equalize_adapthist(img)
img = img * 255.
return img
# == AUGMENTATION == #
def crop_center(img, cropx, cropy):
y, x = img.shape[:2]
startx = x // 2 - (cropx // 2)
starty = y // 2 - (cropy // 2)
return img[starty:starty + cropy, startx:startx + cropx, :]
def data_augmentation(image):
# Input should be ONE image with shape: (L, W, CH)
options = ["gaussian_smooth", "rotate", "zoom", "adjust_gamma"]
# Probabilities for each augmentation were arbitrarily assigned
which_option = np.random.choice(options)
if which_option == "gaussian_smooth":
sigma = np.random.uniform(0.2, 1.0)
image = gaussian_filter(image, sigma)
elif which_option == "zoom":
# Assumes image is square
min_crop = int(image.shape[0] * 0.85)
max_crop = int(image.shape[0] * 0.95)
crop_size = np.random.randint(min_crop, max_crop)
crop = crop_center(image, crop_size, crop_size)
if crop.shape[-1] == 1: crop = crop[:, :, 0]
image = scipy.misc.imresize(crop, image.shape)
elif which_option == "rotate":
angle = np.random.uniform(-15, 15)
image = rotate(image, angle, reshape=False)
elif which_option == "adjust_gamma":
image = image / 255.
image = exposure.adjust_gamma(image, np.random.uniform(0.75, 1.25))
image = image * 255.
if len(image.shape) == 2: image = np.expand_dims(image, axis=2)
return image
# == I/O == #
def load_sample(train_images, num_train_samples, z):
if len(train_images) < num_train_samples:
train_sample_images = np.random.choice(train_images, num_train_samples, replace=True)
train_sample_array = np.asarray([np.load(arr) for arr in train_sample_images])
return train_sample_array, []
train_sample_images = list(set(train_images) - set(z))
if len(train_sample_images) < num_train_samples:
sample_diff = num_train_samples - len(train_sample_images)
not_sampled = list(set(train_images) - set(train_sample_images))
train_sample_images.extend(np.random.choice(not_sampled, sample_diff, replace=False))
z = []
else:
train_sample_images = np.random.choice(train_sample_images, num_train_samples, replace=False)
z.extend(train_sample_images)
train_sample_array = np.asarray([np.load(arr) for arr in train_sample_images])
return train_sample_array, z
def load_sample_and_labels(df, train_images, num_train_samples, z):
if len(train_images) < num_train_samples:
z = []
train_sample_images = np.random.choice(train_images, num_train_samples, replace=True)
train_sample_images = list(set(train_images) - set(z))
if len(train_sample_images) < num_train_samples:
sample_diff = num_train_samples - len(train_sample_images)
not_sampled = list(set(train_images) - set(train_sample_images))
train_sample_images.extend(np.random.choice(not_sampled, sample_diff, replace=False))
z = []
else:
train_sample_images = np.random.choice(train_sample_images, num_train_samples, replace=False)
z.extend(train_sample_images)
train_sample_ids = [_.split("/")[-1].split(".")[0] for _ in train_sample_images]
train_sample_df = df[(df.patientId.isin(train_sample_ids))]
train_sample_df.index = train_sample_df.patientId
train_sample_df = train_sample_df.reindex(train_sample_ids)
train_sample_labels = np.asarray(train_sample_df["label"])
train_sample_array = np.asarray([np.load(arr) for arr in train_sample_images])
return train_sample_array, train_sample_labels, z
def TTA(img, model, model_name, seed=88, niter=0):
np.random.seed(seed)
original_img = img.copy()
inverted_img = np.invert(img.copy())
hflipped_img = np.fliplr(img.copy())
original_img_array = np.empty((niter + 1, img.shape[0], img.shape[1], img.shape[2]))
inverted_img_array = original_img_array.copy()
hflipped_img_array = original_img_array.copy()
original_img_array[0] = original_img
inverted_img_array[0] = inverted_img
hflipped_img_array[0] = hflipped_img
for each_iter in range(niter):
original_img_array[each_iter + 1] = data_augmentation(original_img)
inverted_img_array[each_iter + 1] = data_augmentation(inverted_img)
hflipped_img_array[each_iter + 1] = data_augmentation(hflipped_img)
tmp_array = np.vstack((original_img_array, inverted_img_array, hflipped_img_array))
tmp_array = preprocess_input(tmp_array, model_name)
if int(model.get_output_at(-1).get_shape()[1]) == 1:
prediction = np.mean(model.predict(tmp_array)[:, 0])
else:
prediction = np.mean(model.predict(tmp_array)[:, -1])
return prediction
############
# VALIDATE #
############
def reduce_learning_rate_or_not(metric_list, direction="max", patience=2):
# **NOTE: metric_list should have CURRENT metric as last element
if len(metric_list) < patience + 1:
return False
else:
if direction == "max":
if metric_list[-1] <= metric_list[(-1 - patience)]:
return True
else:
return False
elif direction == "min":
if metric_list[-1] >= metric_list[(-1 - patience)]:
return True
else:
return False
def competitionMetric(y_true, y_pred):
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
TP = np.sum((y_true == 1) & (y_pred == 1))
FP = np.sum((y_true == 0) & (y_pred == 1))
FN = np.sum((y_true == 1) & (y_pred == 0))
return float(TP) / (float(FP) + float(FN) + float(TP))
def calculate_metrics(val_results_dict, y_pred, y_val, suffix=""):
tmp_kappa_list = []
tmp_accur_list = []
tmp_f1_list = []
tmp_cm_list = []
y_val = utils.to_categorical(y_val)[:, -1]
for each_threshold in np.linspace(0.1, 0.9, 17):
tmp_pred = [1 if _ >= each_threshold else 0 for _ in y_pred]
tmp_kappa_list.append(cohen_kappa_score(tmp_pred, y_val))
tmp_accur_list.append(accuracy_score(tmp_pred, y_val))
tmp_f1_list.append(f1_score(tmp_pred, y_val))
tmp_cm_list.append(competitionMetric(tmp_pred, y_val))
auroc = round(roc_auc_score(y_val, y_pred), 3)
kappa = round(np.max(tmp_kappa_list), 3)
accur = round(np.max(tmp_accur_list), 3)
cm = round(np.max(tmp_cm_list), 3)
f1 = round(np.max(tmp_f1_list), 3)
val_results_dict["auc{}".format(suffix)].append(auroc)
val_results_dict["kap{}".format(suffix)].append(kappa)
val_results_dict["acc{}".format(suffix)].append(accur)
val_results_dict["f1{}".format(suffix)].append(f1)
val_results_dict["cm{}".format(suffix)].append(cm)
kappa_threshold = np.linspace(0.1, 0.9, 17)[tmp_kappa_list.index(np.max(tmp_kappa_list))]
accur_threshold = np.linspace(0.1, 0.9, 17)[tmp_accur_list.index(np.max(tmp_accur_list))]
f1_threshold = np.linspace(0.1, 0.9, 17)[tmp_f1_list.index(np.max(tmp_f1_list))]
cm_threshold = np.linspace(0.1, 0.9, 17)[tmp_cm_list.index(np.max(tmp_cm_list))]
val_results_dict["threshold_kap{}".format(suffix)].append(round(kappa_threshold, 2))
val_results_dict["threshold_acc{}".format(suffix)].append(round(accur_threshold, 2))
val_results_dict["threshold_f1{}".format(suffix)].append(round(f1_threshold, 2))
val_results_dict["threshold_cm{}".format(suffix)].append(round(cm_threshold, 2))
return val_results_dict
def validate(val_results_dict, model_name,
model, y_val, X_val, valid_ids, valid_views,
save_weights_path, val_results_path,
subepoch,
batch_size):
y_pred = np.asarray([TTA(img, model, model_name) for img in X_val])
val_results_dict = calculate_metrics(val_results_dict, y_pred, y_val)
val_results_dict = calculate_metrics(val_results_dict, y_pred[valid_views == "AP"], y_val[valid_views == "AP"],
"_AP")
val_results_dict = calculate_metrics(val_results_dict, y_pred[valid_views == "PA"], y_val[valid_views == "PA"],
"_PA")
val_results_dict["subepoch"].append(subepoch)
out_df = pd.DataFrame(val_results_dict)
out_df.to_csv(os.path.join(val_results_path, "results.csv"), index=False)
predictions_df = | pd.DataFrame({"patientId": valid_ids, "y_pred": y_pred}) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
| Timestamp("2000-01-31 00:23:00") | pandas.Timestamp |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert r_grouped['g2'].wrapper.ndim == 2
assert r_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped[['g1']].wrapper.ndim == 2
assert r_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert r_grouped[['g1', 'g2']].wrapper.ndim == 2
assert r_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_filtering(self):
filtered_records = vbt.Records(wrapper, records_arr[[0, -1]])
record_arrays_close(
filtered_records.values,
np.array([(0, 0, 0, 10., 21.), (8, 2, 2, 10., 21.)], dtype=example_dt)
)
# a
record_arrays_close(
filtered_records['a'].values,
np.array([(0, 0, 0, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['a'].map_field('some_field1').id_arr,
np.array([0])
)
assert filtered_records['a'].map_field('some_field1').min() == 10.
assert filtered_records['a'].count() == 1.
# b
record_arrays_close(
filtered_records['b'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['b'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['b'].map_field('some_field1').min())
assert filtered_records['b'].count() == 0.
# c
record_arrays_close(
filtered_records['c'].values,
np.array([(8, 0, 2, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['c'].map_field('some_field1').id_arr,
np.array([8])
)
assert filtered_records['c'].map_field('some_field1').min() == 10.
assert filtered_records['c'].count() == 1.
# d
record_arrays_close(
filtered_records['d'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['d'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['d'].map_field('some_field1').min())
assert filtered_records['d'].count() == 0.
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count'
], dtype='object')
pd.testing.assert_series_equal(
records.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 2.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
records.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 3
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
records.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 6
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c')
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records.stats(column='g2', group_by=group_by)
)
stats_df = records.stats(agg_func=None)
assert stats_df.shape == (4, 4)
pd.testing.assert_index_equal(stats_df.index, records.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# ranges.py ############# #
ts = pd.DataFrame({
'a': [1, -1, 3, -1, 5, -1],
'b': [-1, -1, -1, 4, 5, 6],
'c': [1, 2, 3, -1, -1, -1],
'd': [-1, -1, -1, -1, -1, -1]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
ranges = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days'))
ranges_grouped = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestRanges:
def test_mapped_fields(self):
for name in range_dt.names:
np.testing.assert_array_equal(
getattr(ranges, name).values,
ranges.values[name]
)
def test_from_ts(self):
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 2, 3, 1), (2, 0, 4, 5, 1), (3, 1, 3, 5, 0), (4, 2, 0, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper.freq == day_dt
pd.testing.assert_index_equal(
ranges_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = ranges.records_readable
np.testing.assert_array_equal(
records_readable['Range Id'].values,
np.array([
0, 1, 2, 3, 4
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-01T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed'
])
)
def test_to_mask(self):
pd.testing.assert_series_equal(
ranges['a'].to_mask(),
ts['a'] != -1
)
pd.testing.assert_frame_equal(
ranges.to_mask(),
ts != -1
)
pd.testing.assert_frame_equal(
ranges_grouped.to_mask(),
pd.DataFrame(
[
[True, True],
[False, True],
[True, True],
[True, False],
[True, False],
[True, False]
],
index=ts.index,
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_duration(self):
np.testing.assert_array_equal(
ranges['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_equal(
ranges.duration.values,
np.array([1, 1, 1, 3, 3])
)
def test_avg_duration(self):
assert ranges['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.avg_duration(),
pd.Series(
np.array([129600000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert ranges['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.max_duration(),
pd.Series(
np.array([259200000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert ranges['a'].coverage() == 0.5
pd.testing.assert_series_equal(
ranges.coverage(),
pd.Series(
np.array([0.5, 0.5, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(),
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage()
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True),
pd.Series(
np.array([1.0, 1.0, 1.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True, normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
ranges_grouped.replace(records_arr=np.repeat(ranges_grouped.values, 2)).coverage()
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage', 'Overlap Coverage',
'Total Records', 'Duration: Min', 'Duration: Median', 'Duration: Max',
'Duration: Mean', 'Duration: Std'
], dtype='object')
pd.testing.assert_series_equal(
ranges.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 1.25, pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 3, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), 4, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('1 days 12:00:00'), pd.Timedelta('1 days 00:00:00')
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c')
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges.stats(column='g2', group_by=group_by)
)
stats_df = ranges.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, ranges.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# drawdowns.py ############# #
ts2 = pd.DataFrame({
'a': [2, 1, 3, 1, 4, 1],
'b': [1, 2, 1, 3, 1, 4],
'c': [1, 2, 3, 2, 1, 2],
'd': [1, 2, 3, 4, 5, 6]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
drawdowns = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days'))
drawdowns_grouped = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestDrawdowns:
def test_mapped_fields(self):
for name in drawdown_dt.names:
np.testing.assert_array_equal(
getattr(drawdowns, name).values,
drawdowns.values[name]
)
def test_ts(self):
pd.testing.assert_frame_equal(
drawdowns.ts,
ts2
)
pd.testing.assert_series_equal(
drawdowns['a'].ts,
ts2['a']
)
pd.testing.assert_frame_equal(
drawdowns_grouped['g1'].ts,
ts2[['a', 'b']]
)
assert drawdowns.replace(ts=None)['a'].ts is None
def test_from_ts(self):
record_arrays_close(
drawdowns.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1),
(4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
assert drawdowns.wrapper.freq == day_dt
pd.testing.assert_index_equal(
drawdowns_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = drawdowns.records_readable
np.testing.assert_array_equal(
records_readable['Drawdown Id'].values,
np.array([
0, 1, 2, 3, 4, 5
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Peak Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-04T00:00:00.000000000', '2020-01-03T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Valley Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-05T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-03T00:00:00.000000000', '2020-01-05T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Peak Value'].values,
np.array([
2., 3., 4., 2., 3., 3.
])
)
np.testing.assert_array_equal(
records_readable['Valley Value'].values,
np.array([
1., 1., 1., 1., 1., 1.
])
)
np.testing.assert_array_equal(
records_readable['End Value'].values,
np.array([
3., 4., 1., 3., 4., 2.
])
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Recovered', 'Recovered', 'Active', 'Recovered', 'Recovered', 'Active'
])
)
def test_drawdown(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].drawdown.values,
np.array([-0.5, -0.66666667, -0.75])
)
np.testing.assert_array_almost_equal(
drawdowns.drawdown.values,
np.array([-0.5, -0.66666667, -0.75, -0.5, -0.66666667, -0.66666667])
)
pd.testing.assert_frame_equal(
drawdowns.drawdown.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[-0.5, np.nan, np.nan, np.nan],
[np.nan, -0.5, np.nan, np.nan],
[-0.66666669, np.nan, np.nan, np.nan],
[-0.75, -0.66666669, -0.66666669, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_drawdown(self):
assert drawdowns['a'].avg_drawdown() == -0.6388888888888888
pd.testing.assert_series_equal(
drawdowns.avg_drawdown(),
pd.Series(
np.array([-0.63888889, -0.58333333, -0.66666667, np.nan]),
index=wrapper.columns
).rename('avg_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_drawdown(),
pd.Series(
np.array([-0.6166666666666666, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_drawdown')
)
def test_max_drawdown(self):
assert drawdowns['a'].max_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.max_drawdown(),
pd.Series(
np.array([-0.75, -0.66666667, -0.66666667, np.nan]),
index=wrapper.columns
).rename('max_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_drawdown(),
pd.Series(
np.array([-0.75, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_drawdown')
)
def test_recovery_return(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_return.values,
np.array([2., 3., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_return.values,
np.array([2., 3., 0., 2., 3., 1.])
)
pd.testing.assert_frame_equal(
drawdowns.recovery_return.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[2.0, np.nan, np.nan, np.nan],
[np.nan, 2.0, np.nan, np.nan],
[3.0, np.nan, np.nan, np.nan],
[0.0, 3.0, 1.0, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_recovery_return(self):
assert drawdowns['a'].avg_recovery_return() == 1.6666666666666667
pd.testing.assert_series_equal(
drawdowns.avg_recovery_return(),
pd.Series(
np.array([1.6666666666666667, 2.5, 1.0, np.nan]),
index=wrapper.columns
).rename('avg_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_recovery_return(),
pd.Series(
np.array([2.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_recovery_return')
)
def test_max_recovery_return(self):
assert drawdowns['a'].max_recovery_return() == 3.0
pd.testing.assert_series_equal(
drawdowns.max_recovery_return(),
pd.Series(
np.array([3.0, 3.0, 1.0, np.nan]),
index=wrapper.columns
).rename('max_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_recovery_return(),
pd.Series(
np.array([3.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_recovery_return')
)
def test_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_almost_equal(
drawdowns.duration.values,
np.array([1, 1, 1, 1, 1, 3])
)
def test_avg_duration(self):
assert drawdowns['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.avg_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert drawdowns['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.max_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert drawdowns['a'].coverage() == 0.5
pd.testing.assert_series_equal(
drawdowns.coverage(),
pd.Series(
np.array([0.5, 0.3333333333333333, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
drawdowns_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
def test_decline_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].decline_duration.values,
np.array([1., 1., 1.])
)
np.testing.assert_array_almost_equal(
drawdowns.decline_duration.values,
np.array([1., 1., 1., 1., 1., 2.])
)
def test_recovery_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration.values,
np.array([1, 1, 0])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration.values,
np.array([1, 1, 0, 1, 1, 1])
)
def test_recovery_duration_ratio(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration_ratio.values,
np.array([1., 1., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration_ratio.values,
np.array([1., 1., 0., 1., 1., 0.5])
)
def test_active_records(self):
assert isinstance(drawdowns.active, vbt.Drawdowns)
assert drawdowns.active.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4., 1., 1., 0)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].active.values,
drawdowns.active['a'].values
)
record_arrays_close(
drawdowns.active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
def test_recovered_records(self):
assert isinstance(drawdowns.recovered, vbt.Drawdowns)
assert drawdowns.recovered.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].recovered.values,
drawdowns.recovered['a'].values
)
record_arrays_close(
drawdowns.recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1), (4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
def test_active_drawdown(self):
assert drawdowns['a'].active_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.active_drawdown(),
pd.Series(
np.array([-0.75, np.nan, -0.3333333333333333, np.nan]),
index=wrapper.columns
).rename('active_drawdown')
)
with pytest.raises(Exception):
drawdowns_grouped.active_drawdown()
def test_active_duration(self):
assert drawdowns['a'].active_duration() == np.timedelta64(86400000000000)
pd.testing.assert_series_equal(
drawdowns.active_duration(),
pd.Series(
np.array([86400000000000, 'NaT', 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_duration()
def test_active_recovery(self):
assert drawdowns['a'].active_recovery() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery(),
pd.Series(
np.array([0., np.nan, 0.5, np.nan]),
index=wrapper.columns
).rename('active_recovery')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery()
def test_active_recovery_return(self):
assert drawdowns['a'].active_recovery_return() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery_return(),
pd.Series(
np.array([0., np.nan, 1., np.nan]),
index=wrapper.columns
).rename('active_recovery_return')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_return()
def test_active_recovery_duration(self):
assert drawdowns['a'].active_recovery_duration() == pd.Timedelta('0 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.active_recovery_duration(),
pd.Series(
np.array([0, 'NaT', 86400000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_recovery_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_duration()
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage [%]', 'Total Records',
'Total Recovered Drawdowns', 'Total Active Drawdowns',
'Active Drawdown [%]', 'Active Duration', 'Active Recovery [%]',
'Active Recovery Return [%]', 'Active Recovery Duration',
'Max Drawdown [%]', 'Avg Drawdown [%]', 'Max Drawdown Duration',
'Avg Drawdown Duration', 'Max Recovery Return [%]',
'Avg Recovery Return [%]', 'Max Recovery Duration',
'Avg Recovery Duration', 'Avg Recovery Duration Ratio'
], dtype='object')
pd.testing.assert_series_equal(
drawdowns.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(settings=dict(incl_active=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 69.44444444444444, 62.962962962962955,
| pd.Timedelta('1 days 16:00:00') | pandas.Timedelta |
import sys
import numpy as np
import pandas as pd
# data_dir = 'data/train_data_comp/'
# data_dir = 'data/train_data'
data_dir = 'data/'
class Student:
free_students = set()
def __init__(self, sid: int, pref_list: list, math_grade, cs_grade, utils):
self.sid = sid
self.math_grade = math_grade
self.cs_grade = cs_grade
self.pref_list = pref_list
self.max_rejects = len(pref_list) - 1
self.rejects = 0
self.project = None
self.top_pref = None
self.set_top_pref()
self.utils = utils
self.pair = None
self.main_partner = True
def assign_proj(self, project):
self.project = project
Student.free_students.discard(self.sid)
if self.pair:
self.pair.project = project
def is_free(self):
return bool(not self.project)
def get_top_pref(self):
return self.top_pref
def set_top_pref(self):
self.top_pref = self.pref_list[self.rejects]
def reject_student(self):
self.project = None
Student.free_students.add(self.sid)
if self.rejects < self.max_rejects:
self.rejects += 1
self.set_top_pref()
def test_offer(self, proj_nominee: int):
"""Returns True if the input project is preferable"""
try:
candidate_pref_index = self.pref_list.index(proj_nominee)
except ValueError:
return False
try:
cur_pref_index = self.pref_list.index(self.project.pid)
except ValueError:
cur_pref_index = self.max_rejects
return True if self.utils[candidate_pref_index] > self.utils[cur_pref_index] else False
def get_utility(self):
try:
idx = self.pref_list.index(self.project.pid)
except ValueError:
idx = self.max_rejects
return self.utils[idx]
class Project:
def __init__(self, pid):
self.pid = pid
self.grade_type = 'cs_grade' if pid % 2 else 'math_grade'
self.proposals = {}
self.main_student = None
self.partner_student = None
self.price = 0
def is_free(self):
return bool(not self.main_student)
def accept_offer(self, student):
if self.main_student:
self.main_student.reject_student()
self.main_student = student
student.assign_proj(self)
if student.pair:
self.partner_student = student.pair
def test_offer(self, nominee, student_candidate=None):
if student_candidate:
student = student_candidate
elif self.main_student:
student = self.main_student
else:
return True
if getattr(nominee, self.grade_type) > getattr(student, self.grade_type):
return True
else:
return False
def add_offer(self, student):
self.proposals[student.sid] = getattr(student, self.grade_type)
def create_students(df, grades_df):
students_dict = {}
for sid, pref_sr in df.iterrows():
desc_sr = pref_sr.sort_values(ascending=False)
students_dict[sid] = Student(sid, desc_sr.index.tolist(), grades_df.loc[sid].math_grades,
grades_df.loc[sid].cs_grades, desc_sr.tolist())
Student.free_students.add(sid)
return students_dict
def create_projects(df):
projects_dict = {}
for project_id in df.columns:
projects_dict[project_id] = Project(project_id)
return projects_dict
def make_offers(students_dict, projects_dict):
for sid in Student.free_students:
student = students_dict[sid]
project = projects_dict[student.get_top_pref()]
project.add_offer(student)
def respond_offers(students_dict, projects_dict):
for pid, project in projects_dict.items():
if not project.proposals:
continue
top_sid = max(project.proposals, key=(lambda key: project.proposals[key]))
project.proposals.pop(top_sid)
for rej_sid in project.proposals:
students_dict[rej_sid].reject_student()
project.proposals = {}
if project.is_free():
project.accept_offer(students_dict[top_sid])
elif project.test_offer(students_dict[top_sid]):
project.accept_offer(students_dict[top_sid])
else:
students_dict[top_sid].reject_student()
def deferred_acceptance(students_dict, projects_dict):
while Student.free_students:
make_offers(students_dict, projects_dict)
respond_offers(students_dict, projects_dict)
def task_one(pref_df, grades_df):
students_dict = create_students(pref_df, grades_df)
projects_dict = create_projects(pref_df)
deferred_acceptance(students_dict, projects_dict)
matching = {}
for sid, student in students_dict.items():
if student.is_free():
print('Error! missing matching')
return np.nan
matching[sid] = student.project.pid
return matching
def merge_students(pref_df, grades_df, pairs_df: pd.DataFrame):
"""Will choose the leader of the pair by the average of the grades"""
students_dict = create_students(pref_df, grades_df)
Student.free_students = set()
for st_1, st_2 in pairs_df.values:
if pd.isna(st_2): # In case st_1 doesn't have a pair
Student.free_students.add(st_1)
continue
avg_grade_1 = np.mean([students_dict[st_1].math_grade, students_dict[st_1].cs_grade])
avg_grade_2 = np.mean([students_dict[st_2].math_grade, students_dict[st_2].cs_grade])
if avg_grade_1 > avg_grade_2:
students_dict[st_1].pair = students_dict[st_2]
students_dict[st_2].main_partner = False
Student.free_students.add(st_1)
else:
students_dict[st_2].pair = students_dict[st_1]
students_dict[st_1].main_partner = False
Student.free_students.add(st_2)
return students_dict
def task_two(pref_df, grades_df, pairs_df):
students_dict = merge_students(pref_df, grades_df, pairs_df)
projects_dict = create_projects(pref_df)
deferred_acceptance(students_dict, projects_dict)
matching = {}
for sid, student in students_dict.items():
if student.is_free():
print('Error! missing matching')
return np.nan
matching[sid] = student.project.pid
return matching
def test_blocking(student, suspected_blocking, students_dict):
cur_project = student.project
counter = 0
for suspect_id in suspected_blocking:
suspect = students_dict[suspect_id]
if suspect is student:
continue
if cur_project.test_offer(suspect, student) and student.test_offer(suspect.project.pid):
if suspect.test_offer(cur_project.pid) and suspect.project.test_offer(student, suspect):
print('Blocking pair (students):', student.sid, suspect.sid)
counter += 1
return counter
def find_blocking_pairs(students_dict):
suspects = set()
# check if the student got his top priority, if not, might be blocking
for sid, student in students_dict.items():
if student.pref_list[0] == student.project.pid:
continue
suspects.add(sid)
counter = 0
suspects_copy = suspects.copy()
for sid in suspects_copy:
counter += test_blocking(students_dict[sid], suspects, students_dict)
suspects.remove(sid)
return counter
def reconstruct_matching(students_dict, projects_dict, matching_df: pd.DataFrame):
for sid, pid in matching_df.itertuples():
student = students_dict[sid]
project = projects_dict[pid]
project.accept_offer(student)
def reconstruct_matching_pairs(students_dict, projects_dict, matching_df):
for sid, pid in matching_df.itertuples():
student = students_dict[sid]
project = projects_dict[pid]
if student.main_partner:
project.accept_offer(student)
else:
project.partner_student = student
student.assign_proj(project)
def recon_matching(matching_file, n):
grades_df = | pd.read_csv(f'{data_dir}/grades_{n}.csv', index_col='student_id') | pandas.read_csv |
# -*- codeing = utf-8 -*-
# @Time : 2021-07-09 2:08
# @Author : cAMP-Cascade-DNN
# @File : idSteal.py
# @Software : Pycharm
# @Contact: qq:1071747983
# mail:<EMAIL>
# -*- 功能说明 -*-
# 设定爬虫结构 进行爬虫数据本地保存与数据库上传
# -*- 功能说明 -*-
from queue import Queue
from Selenium import Selenium
from Sql import DbConnect
import os
import sys
import time
import configparser
import pandas as pd
class Spider:
# 初始化爬虫与数据列表 初始化数据库对象
def __init__(self):
# 获取项目文件夹路径用于用户数据保存
if getattr(sys, 'frozen', False):
self.dirPath = os.path.dirname(sys.executable)
elif __file__:
self.dirPath = os.path.dirname(os.path.abspath(__file__))
self.dirPath = os.path.dirname(self.dirPath)
# 读取配置文件
cf = configparser.ConfigParser()
cf.read(os.path.join(self.dirPath, "config.ini"))
# 获取种子
self.seed = cf.get("spider", 'seed')
self.a=Selenium()
# 待爬取用户队列
self.userQueue = Queue(maxsize=300)
self.userQueue.put(self.seed)
# 总爬取用户列表
self.userList = []
# 单次爬取用户列表
self.childList = []
# 已爬取用户数
self.userCount = 0
self.baseUrl = 'https://pubg.op.gg/user/'
# 初始化数据库对象
self.db = DbConnect()
self.savePath = os.path.join(self.dirPath, 'username.csv')
# 设定爬取结构 共计爬取约1w用户信息
def run(self):
'''断点回溯
with open(self.savePath, 'r', encoding="utf-8") as csvFile: # 读取用户信息文件并存储在列表中
reader = csv.reader(csvFile)
rows = [row for row in reader]
for row_player in rows: # 读取列表中的用户信息 加入已爬取用户列表
self.userList.append(row_player[1])
# 更新用户数
self.userCount = len(self.userList)
'''
# 第一次扩展 从种子用户进行400场次爬取
url = os.path.join(self.baseUrl, self.userQueue.get())
sel = Selenium()
list = sel.run(url)
time.sleep(0.2)
self.childList = []
for i in list:
# 筛选非重复用户加入待查询队列与用户列表
if i not in self.userList and i != 'None' and i != self.seed:
self.userList.append(str(i))
self.childList.append(str(i))
self.userQueue.put(i)
self.userCount += 1
print('第一次扩展 当前第' + str(self.userCount) + '人')
# 数据清洗 存储
self.dataCleaning()
self.save()
time.sleep(1)
print('111')
# 第二次扩展 从待爬取用户队列进行拓展
length = self.userQueue.qsize()
print(length)
id = 0
for i in range(length):
id += 1
url = os.path.join(self.baseUrl, self.userQueue.get())
# 输入待爬取网址 返回用户列表
sel = Selenium()
list1 = sel.run(url)
self.childList = []
for i in list1:
# 筛选合法用户加入用户列表 重复数据后续再处理
if i not in self.userList and i != 'None' and i != seed:
self.userList.append(i)
self.childList.append(i)
self.userCount += 1
print('第二次拓展 当前进度 ' + str(id) + "/" + str(length) +
'当前第' + str(self.userCount) + '人')
self.dataCleaning()
self.save()
self.db.close()
# 数据清洗与数据库存贮
def dataCleaning(self):
# 去除无用用户名 #unknown 0
self.userList.remove('#unknown')
self.userList.remove('0')
self.userCount = len(self.userList)
# 利用数据的有序性 降低数据库插入的时间复杂度
self.userList.sort()
status = self.db.userInsert(self.childList)
while status:
status = self.db.userInsert(self.childList)
# 数据保存到本地CSV文件
def save(self):
userFile = | pd.Series(self.userList) | pandas.Series |
#!/usr/bin/env python2
# -*- coding: utf-8 -*
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import pylab
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sea
import pandas as pd
from utils import mkdir_p
from feature_selection import kolmogorov_smirnov_two_sample_test
sea.set()
from utils import save_fig
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
def visualize_pca2D(X,y):
"""
Visualize the first two principal components
Keyword arguments:
X -- The feature vectors
y -- The target vector
"""
pca = PCA(n_components = 2)
principal_components = pca.fit_transform(X)
palette = sea.color_palette()
plt.scatter(principal_components[y==0, 0], principal_components[y==0, 1], marker='s',color='green',label="Paid", alpha=0.5,edgecolor='#262626', facecolor=palette[1], linewidth=0.15)
plt.scatter(principal_components[y==1, 0], principal_components[y==1, 1], marker='^',color='red',label="Default", alpha=0.5,edgecolor='#262626''', facecolor=palette[2], linewidth=0.15)
leg = plt.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.title("Two-Dimensional Principal Component Analysis")
plt.tight_layout
#save fig
output_dir='img'
save_fig(output_dir,'{}/pca2D.png'.format(output_dir))
def visualize_pca3D(X,y):
"""
Visualize the first three principal components
Keyword arguments:
X -- The feature vectors
y -- The target vector
"""
pca = PCA(n_components = 3)
principal_components = pca.fit_transform(X)
fig = pylab.figure()
ax = Axes3D(fig)
# azm=30
# ele=30
# ax.view_init(azim=azm,elev=ele)
palette = sea.color_palette()
ax.scatter(principal_components[y==0, 0], principal_components[y==0, 1], principal_components[y==0, 2], label="Paid", alpha=0.5,
edgecolor='#262626', c=palette[1], linewidth=0.15)
ax.scatter(principal_components[y==1, 0], principal_components[y==1, 1], principal_components[y==1, 2],label="Default", alpha=0.5,
edgecolor='#262626''', c=palette[2], linewidth=0.15)
ax.legend()
plt.show()
def visualize_lda2D(X,y):
"""
Visualize the separation between classes using the two most discriminant features
Keyword arguments:
X -- The feature vectors
y -- The target vector
"""
labels=['Paid','Default']
lda = LDA(n_components = 2,solver='eigen')
# lda = LDA(n_components = 2)
discriminative_attributes = lda.fit(X, y).transform(X)
palette = sea.color_palette()
# plt.plot(discriminative_attributes[:,0][y==0],'sg',label="Paid", alpha=0.5)
# plt.plot(discriminative_attributes[:,0][y==1],'^r',label="Default", alpha=0.5)
plt.scatter(discriminative_attributes[:,0][y==0],discriminative_attributes[:,1][y==0],marker='s',color='green',label="Paid", alpha=0.5)
plt.scatter(discriminative_attributes[:,0][y==1],discriminative_attributes[:,1][y==1],marker='^',color='red',label="Default", alpha=0.5)
plt.xlabel('First Linear Discriminant')
plt.ylabel('Second Linear Discriminant')
leg = plt.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.title("Linear Discriminant Analysis")
plt.tight_layout
#save fig
output_dir='img'
save_fig(output_dir,'{}/lda.png'.format(output_dir))
def visualize_feature_hist_dist(X,y,selected_feature,features,normalize=False):
"""
Visualize the histogram distribution of a feature
Keyword arguments:
X -- The feature vectors
y -- The target vector
selected_feature -- The desired feature to obtain the histogram
features -- Vector of feature names (X1 to XN)
normalize -- Whether to normalize the histogram (Divide by total)
"""
#create data
joint_data=np.column_stack((X,y))
column_names=features
#create dataframe
df=pd.DataFrame(data=joint_data,columns=column_names)
palette = sea.hls_palette()
#find number of unique values (groups)
unique_values=pd.unique(df[[selected_feature]].values.ravel())
unique_values=map(int, unique_values)
unique_values.sort()
n_groups=len(unique_values)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.4
#find values belonging to the positive class and values belonging to the negative class
positive_class_index=df[df[features[-1]] == 1].index.tolist()
negative_class_index=df[df[features[-1]] != 1].index.tolist()
positive_values=df[[selected_feature]].loc[positive_class_index].values.ravel()
positive_values=map(int, positive_values)
negative_values=df[[selected_feature]].loc[negative_class_index].values.ravel()
negative_values=map(int, negative_values)
#normalize data (divide by total)
n_positive_labels=n_negative_labels=1
if normalize==True:
n_positive_labels=len(y[y==1])
n_negative_labels=len(y[y!=1])
#count
positive_counts=[0]*len(index)
negative_counts=[0]*len(index)
for v in xrange(len(unique_values)):
positive_counts[v]=positive_values.count(v)/n_positive_labels
negative_counts[v]=negative_values.count(v)/n_negative_labels
#plot
plt.bar(index, positive_counts, bar_width,alpha=opacity,color='b',label='Default') #class 1
plt.bar(index+bar_width, negative_counts, bar_width,alpha=opacity,color='r',label='Paid') #class 0
plt.xlabel(selected_feature)
plt.ylabel('Frequency')
if normalize:
plt.ylabel('Proportion')
plt.title("Normalized Histogram Distribution of the feature '"+selected_feature+"' grouped by class")
plt.xticks(index + bar_width, map(str, unique_values) )
plt.legend()
plt.tight_layout()
# plt.show()
#save fig
output_dir = "img"
save_fig(output_dir,'{}/{}_hist_dist.png'.format(output_dir,selected_feature))
def visualize_hist_pairplot(X,y,selected_feature1,selected_feature2,features,diag_kind):
"""
Visualize the pairwise relationships (Histograms and Density Funcions) between classes and respective attributes
Keyword arguments:
X -- The feature vectors
y -- The target vector
selected_feature1 - First feature
selected_feature1 - Second feature
diag_kind -- Type of plot in the diagonal (Histogram or Density Function)
"""
#create data
joint_data=np.column_stack((X,y))
column_names=features
#create dataframe
df=pd.DataFrame(data=joint_data,columns=column_names)
#plot
palette = sea.hls_palette()
splot=sea.pairplot(df, hue="Y", palette={0:palette[2],1:palette[0]},vars=[selected_feature1,selected_feature2],diag_kind=diag_kind)
splot.fig.suptitle('Pairwise relationship: '+selected_feature1+" vs "+selected_feature2)
splot.set(xticklabels=[])
# plt.subplots_adjust(right=0.94, top=0.94)
#save fig
output_dir = "img"
save_fig(output_dir,'{}/{}_{}_hist_pairplot.png'.format(output_dir,selected_feature1,selected_feature2))
# plt.show()
def visualize_hist_pairplots(X,y):
"""
Visualize the pairwise relationships (Histograms and Density Funcions) between classes and respective attributes
Keyword arguments:
X -- The feature vectors
y -- The target vector
"""
joint_data=np.column_stack((X,y))
df=pd.DataFrame(data=joint_data,columns=["Credit","Gender","Education","Marital Status","Age","X6","X7","X8","X9","X10","X11","X12","X13","X14","X15","X16","X17","X18","X19","X20","X21","X22","X23","Default"])
palette = sea.hls_palette()
#histograms
splot=sea.pairplot(df, hue="Default", palette={0:palette[2],1:palette[0]},vars=["Credit","Gender","Education","Marital Status","Age"])
splot.fig.suptitle('Histograms Distributions and Scatter Plots: Credit, Gender, Education, Marital Status and Age')
splot.set(xticklabels=[])
plt.subplots_adjust(right=0.94, top=0.94)
plt.show()
splot=sea.pairplot(df, hue="Default", palette={0:palette[2],1:palette[0]},vars=["X6","X7","X8","X9","X10","X11"])
splot.fig.suptitle('Histograms Distributions and Scatter Plots: History of Payment')
splot.set(xticklabels=[])
plt.subplots_adjust(right=0.94, top=0.94)
plt.show()
splot=sea.pairplot(df, hue="Default", palette={0:palette[2],1:palette[0]},vars=["X12","X13","X14","X15","X16","X17"])
splot.fig.suptitle('Histograms Distributions and Scatter Plots: Amount of Bill Statements')
splot.set(xticklabels=[])
plt.subplots_adjust(right=0.94, top=0.94)
plt.show()
splot=sea.pairplot(df, hue="Default", palette={0:palette[2],1:palette[0]},vars=["X18","X19","X20","X21","X22","X23"])
splot.fig.suptitle('Histograms Distributions and Scatter Plots: Amount of Previous Payments')
splot.set(xticklabels=[])
plt.subplots_adjust(right=0.94, top=0.94)
plt.show()
#kdes
splot=sea.pairplot(df, hue="Default", palette={0:palette[2],1:palette[0]},diag_kind="kde",vars=["Credit","Gender","Education","Marital Status","Age"])
splot.fig.suptitle('Univariate Kernel Density Estimations and Scatter Plots: Credit, Gender, Education, Marital Status and Age')
splot.set(xticklabels=[])
plt.subplots_adjust(right=0.94, top=0.94)
plt.show()
splot=sea.pairplot(df, hue="Default", palette={0:palette[2],1:palette[0]},diag_kind="kde",vars=["X6","X7","X8","X9","X10","X11"])
splot.fig.suptitle('Univariate Kernel Density Estimations and Scatter Plots: History of Payment')
splot.set(xticklabels=[])
plt.subplots_adjust(right=0.94, top=0.94)
plt.show()
splot=sea.pairplot(df, hue="Default", palette={0:palette[2],1:palette[0]},diag_kind="kde",vars=["X12","X13","X14","X15","X16","X17"])
splot.fig.suptitle('Univariate Kernel Density Estimations and Scatter Plots: Amount of Bill Statements')
splot.set(xticklabels=[])
plt.subplots_adjust(right=0.94, top=0.94)
plt.show()
splot=sea.pairplot(df, hue="Default", palette={0:palette[2],1:palette[0]},diag_kind="kde",vars=["X18","X19","X20","X21","X22","X23"])
splot.fig.suptitle('Univariate Kernel Density Estimations and Scatter Plots: Amount of Previous Payments')
splot.set(xticklabels=[])
plt.subplots_adjust(right=0.94, top=0.94)
plt.show()
def visualize_feature_boxplot(X,y,selected_feature,features):
"""
Visualize the boxplot of a feature
Keyword arguments:
X -- The feature vectors
y -- The target vector
selected_feature -- The desired feature to obtain the histogram
features -- Vector of feature names (X1 to XN)
"""
#create data
joint_data=np.column_stack((X,y))
column_names=features
#create dataframe
df=pd.DataFrame(data=joint_data,columns=column_names)
# palette = sea.hls_palette()
splot=sea.boxplot(data=df,x='Y',y=selected_feature,hue="Y",palette="husl")
plt.title('BoxPlot Distribution of '+selected_feature)
#save fig
output_dir = "img"
save_fig(output_dir,'{}/{}_boxplot.png'.format(output_dir,selected_feature))
# plt.show()
def visualize_boxplots(X,y):
"""
Visualize the boxplots of the features
Keyword arguments:
X -- The feature vectors
y -- The target vector
"""
credit=X[:,0:1]
df=pd.DataFrame(data=credit,columns=["Credit"])
splot=sea.boxplot(data=df, orient="h",palette="husl")
plt.title('BoxPlot Distribution of Credit')
plt.show()
one_to_four_columns=X[:,1:4]
df=pd.DataFrame(data=one_to_four_columns,columns=["Gender","Education","Marital Status"])
splot=sea.boxplot(data=df, orient="h",palette="husl")
plt.title('BoxPlot Distribution of Features: Gender, Education and Marital Status')
plt.show()
age=X[:,4:5]
df=pd.DataFrame(data=age,columns=["Age"])
splot=sea.boxplot(data=df, orient="h",palette="husl")
plt.title('BoxPlot Distribution of Age')
plt.show()
x6_to_x11=X[:,5:11]
df=pd.DataFrame(data=x6_to_x11,columns=["X6","X7","X8","X9","X10","X11"])
splot=sea.boxplot(data=df, orient="h",palette="husl")
plt.title('BoxPlot Distribution of Features: History of Payment')
plt.show()
x12_to_x17=X[:,11:17]
df=pd.DataFrame(data=x12_to_x17,columns=["X12","X13","X14","X15","X16","X17"])
splot=sea.boxplot(data=df, orient="h",palette="husl")
plt.title('BoxPlot Distribution of Features: Amount of Bill Statements')
plt.show()
x18_to_x23=X[:,17:23]
df= | pd.DataFrame(data=x12_to_x17,columns=["X18","X19","X20","X21","X22","X23"]) | pandas.DataFrame |
from typing import Optional
import numpy as np
import pandas as pd
from pandas import DatetimeIndex
from stateful.representable import Representable
from stateful.storage.tree import DateTree
from stateful.utils import list_of_instance, cast_output
from pandas.api.types import infer_dtype
class Stream(Representable):
def __init__(self, name, configuration: dict = None, dtype=None, tree: Optional[DateTree] = None):
Representable.__init__(self)
self.name = name
self.dtype = dtype if dtype else configuration.get("dtype")
self._tree = tree
self.configuration = configuration if configuration else {}
@property
def length(self):
return len(self._tree) if self._tree else None
@property
def tree(self):
if self._tree is None:
self._tree = DateTree(self.name, self.dtype)
return self._tree
@property
def interpolation(self):
return self.tree.interpolation
@property
def empty(self):
return not (self.tree is not None and not self.tree.empty)
@property
def start(self):
return self.tree.start
@property
def end(self):
return self.tree.end
@property
def first(self):
return self.tree.first
@property
def last(self):
return self.tree.last
def set_name(self, name):
self.name = name
self._tree.name = name
def ceil(self, date):
return self.tree.ceil(date)
def floor(self, date):
return self.tree.floor(date)
def head(self, n=5) -> pd.DataFrame:
if self.empty:
return pd.DataFrame()
index, values = [], []
iterator = iter(self._tree)
while len(index) < n:
idx, value = next(iterator)
index.append(idx)
values.append(value)
if list_of_instance(values, dict):
return | pd.DataFrame(values, index=index) | pandas.DataFrame |
# decision tree model
import pandas as pd
x = pd.read_csv("../../dataset/input_data.csv").to_numpy()
resp = pd.read_csv("../../dataset/output_data.csv").to_numpy()
##
from sklearn.decomposition import PCA
import numpy as np
pca = PCA(n_components = 1)
resp_pca = pca.fit_transform(resp)
y = (resp_pca > 0).astype("int")
y = np.ravel(y)
##
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.95, random_state = 1)
print("Train size:", y_train.shape[0], "; % trues:", np.sum(y_train)/y_train.shape[0])
print("Test size:", y_test.shape[0], "; % trues:", np.sum(y_test)/y_test.shape[0])
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV
# add gridsearch
model = DecisionTreeClassifier()
params = {"criterion" : ["gini", "entropy"],
"splitter" : ["best", "random"],
"min_samples_split" : [2, 20, 200]}
# fit model
clf = GridSearchCV(model, params)
clf.fit(x_train, y_train)
# grid search results
results= | pd.DataFrame.from_dict(clf.cv_results_) | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 16 20:50:39 2019
@author: mwu
"""
from __future__ import absolute_import
import networkx as nx
import pandas as pd
from community import community_louvain
import snf
import numpy as np
import warnings
import networkx.algorithms.traversal as nextra
from ._random_walk import greedy_walk, supervised_random_walk
from ._ensemble_classifier import _generate_x_y, ensemble_classifier
def __init__():
warnings.simplefilter("ignore")
def __linkage_to_adjlink(linkage_table, node_list):
"""
convert linkage table to weighted adjacency matrix
"""
adjlink_matrix = pd.DataFrame(0, columns=node_list, index=node_list, dtype=np.float)
# source, target, score = list(linkage_table.columns)
for i in range(0, len(linkage_table)):
if (linkage_table['source'][i] in node_list) & (linkage_table['target'][i] in node_list):
adjlink_matrix.loc[linkage_table['source'][i]][linkage_table['target'][i]] = linkage_table['weight'][i]
adjlink_matrix.loc[linkage_table['target'][i]][linkage_table['source'][i]] = linkage_table['weight'][i]
else:
break
return np.array(adjlink_matrix)
def __snf_based_merge(link_1, link_2):
"""
snf network merge based on Wang, Bo, et al. Nature methods 11.3 (2014): 333.
"""
warnings.simplefilter("ignore")
node_list = list(set(link_1['source']) & set(link_1['target']) & set(link_2['source']) & set(link_2['target']))
adjlinks = list()
adjlinks.append(__linkage_to_adjlink(link_1, node_list))
adjlinks.append(__linkage_to_adjlink(link_2, node_list))
affinity_matrix = snf.make_affinity(adjlinks)
fused_network = snf.snf(affinity_matrix)
Graph = nx.from_pandas_adjacency(pd.DataFrame(fused_network, index=node_list, columns=node_list))
return pd.DataFrame(Graph.edges, columns=['source', 'target'])
def buildnet(gnetdata, key_links, top=None):
"""
Given linkage table, build gene correlation graph
----------------------------------------------------
:param gnetdata: Gnetdata object.
:param key_links: str, key of links referring which linkage table for buidling graph
:param top: int, default None. top ranked links
:return: Gnetdata object with graph added into NetAttrs
"""
top = gnetdata.NetAttrs[key_links].shape[0] if top is None else top
links_filter = gnetdata.NetAttrs[key_links].sort_values('weight', ascending=False).head(top)
G = nx.from_pandas_edgelist(links_filter,
source="source",
target="target",
edge_attr='weight')
gnetdata._add_netattr('graph', G)
gnetdata._add_netattr_para('top', str(top))
print('graph added into NetAttrs')
return gnetdata
def get_centrality(gnetdata):
"""
Measure node centrality in the network.
---------------------------------------
:param gnetdata: Gnetdata object.
:return: gnetData object with 'centralities' added into NetAttrs
"""
G = gnetdata.NetAttrs['graph']
centralities = pd.DataFrame(list(G.nodes), columns=['node'])
centralities['betweenness'] = pd.DataFrame.from_dict(list(nx.betweenness_centrality(G).items()))[1]
centralities['closeness'] = pd.DataFrame.from_dict(list(nx.closeness_centrality(G).items()))[1]
centralities['degree'] = pd.DataFrame.from_dict(list(nx.degree_centrality(G).items()))[1]
centralities['pageRank'] = pd.DataFrame.from_dict(list(nx.pagerank(G).items()))[1]
gnetdata.NetAttrs['centralities'] = centralities
print('node centralities added into NetAttrs.')
return gnetdata
def detect_community(gnetdata, **kwargs):
"""
Detect gene modules via louvain community dection algorithm.
-------------------------------------------------------------
:param gnetdata: Gnetdata object.
:param kwargs: additional parameters passed to community_louvain.best_partition()
:return: Gnetdata object with 'communities' added into NetAttrs
"""
G = gnetdata.NetAttrs['graph']
partition = community_louvain.best_partition(G, **kwargs)
communities = pd.DataFrame.from_dict(list(partition.items()))
communities.columns = ['node', 'group']
gnetdata.NetAttrs['communities'] = communities
print('gene communities added into NetAttrs')
return gnetdata
def find_consensus_graph(gnetdata, link_key='all', method='intersection',
top_rank=100, set_train=[0.05, 0.5], **kwargs):
"""
Given multiple linkage tables, it predicts consensus links.
------------------------------------------------------------
:param gnetdata: Gnetdata object.
:param link_key: list, default all. key referring to linkage table.
:param method: str, default intersection. methods for detecting consensus links. Note: intersection is recommended when there are less than 3 linkage tables.
:param top_rank: int, default 100. top ranked edges for intersection method.
:param set_train: list, default [0.05, 0.5]. Edges are ranked by weight obtained from individual methods. To train the classification model, we set top 5% edges as 'consensus edges (1)' and bottom 50% edges as 'non-consensus edges (0)' individual methods
:return: Gnetdata object with consensus links added into NetAttrs.
"""
assert method in ['intersection', 'snf',
'ensemble'], 'only following methods acceptable : intersection, snf, ensemble'
keys = list(filter(lambda x: 'links' in x, gnetdata.NetAttrs.keys())) if link_key == 'all' else link_key
if method == 'intersection':
merged_links = gnetdata.NetAttrs[keys[0]].sort_values('weight', ascending=False, ignore_index=True).head(top_rank)
for i in range(1, len(keys)):
sorted_links = gnetdata.NetAttrs[keys[i]].sort_values('weight',
ascending=False, ignore_index=True).head(top_rank)
merged_links = graph_merge(merged_links, sorted_links, method=method)
elif method == 'ensemble':
links_dict = dict(filter(lambda i: i[0] in keys, gnetdata.NetAttrs.items()))
X, Y, df_final = _generate_x_y(links_dict, top_rank=top_rank, set_train=set_train)
merged_links = ensemble_classifier(X, Y, df_final, **kwargs)
print('there are {} consensus edges found!'.format(merged_links.shape[0]))
gnetdata._add_netattr('consensus_links', merged_links)
return gnetdata
def graph_merge(link_1, link_2, method='union'):
"""
Given two graphs, it returns merged graph.
----------------------------------------------
:param link_1: dataframe. linkage table of graph_1
:param link_2: dataframe. linkage table of graph_2
:param method: str, default union. methods:[union, intersection, snf]. snf refers to similarity network fusion.
:return: dataframe, merged linkage
"""
assert method in ['union', 'intersection', 'snf'], 'valid method parameter: union, intersection, knn!'
if method in ['union', 'intersection']:
link_1['edge'] = ["_".join(sorted([link_1.source[i], link_1.target[i]])) for i in range(link_1.shape[0])]
link_2['edge'] = ["_".join(sorted([link_2.source[i], link_2.target[i]])) for i in range(link_2.shape[0])]
mergedlinks = | pd.merge(link_1, link_2, on=['edge'], how='outer' if method == 'union' else 'inner') | pandas.merge |
"""Module with the tests for the pileup creation realted tasks."""
import os
import unittest
from unittest.mock import patch
from unittest.mock import MagicMock, PropertyMock
import pandas as pd
from pandas.testing import assert_frame_equal
import numpy as np
from hicognition.test_helpers import LoginTestCase, TempDirTestCase
from hicognition.utils import get_optimal_binsize
# add path to import app
# import sys
# sys.path.append("./")
from app import db
from app.models import Dataset, Intervals, Assembly, Task, ObsExp
from app.tasks import pipeline_pileup
from app.pipeline_steps import pileup_pipeline_step
from app.pipeline_worker_functions import (
_do_pileup_fixed_size,
_do_pileup_variable_size,
)
class TestPipelinePileup(LoginTestCase, TempDirTestCase):
"""Tests whether pipelin_pileup task calls
the pipeline steps correctly"""
def setUp(self):
"""Add test dataset"""
# call setUp of LoginTestCase to initialize app
super(TestPipelinePileup, self).setUp()
# add assembly
self.hg19 = Assembly(
id=1,
name="hg19",
chrom_sizes=self.app.config["CHROM_SIZES"],
chrom_arms=self.app.config["CHROM_ARMS"],
)
db.session.add(self.hg19)
db.session.commit()
# add dataset
self.bedfile = Dataset(id=1, filetype="bedfile", user_id=1, assembly=1)
self.coolerfile = Dataset(id=2, filetype="cooler", user_id=1, assembly=1)
# add intervals
self.intervals1 = Intervals(
id=1, name="testRegion1", dataset_id=1, windowsize=200000
)
self.intervals2 = Intervals(
id=2, name="testRegion2", dataset_id=1, windowsize=200000
)
# make tasks
self.finished_task1 = Task(
id="test1", dataset_id=2, intervals_id=1, complete=True
)
self.unfinished_task1 = Task(
id="test1", dataset_id=2, intervals_id=1, complete=False
)
@staticmethod
def get_call_args_without_index(mock, remove_index):
"""extracts call args from magic mock object and removes
object at index"""
call_args = []
for call in mock.call_args_list:
current_call_list = []
for index in range(len(call[0])):
if index == remove_index:
# remove chromosome arms dataframe
continue
current_call_list.append(call[0][index])
call_args.append(current_call_list)
return call_args
@patch("app.pipeline_steps.set_task_progress")
@patch("app.pipeline_steps.pileup_pipeline_step")
def test_pipeline_pileup_calls_steps_correctly(
self, mock_pileup_pipeline_step, mock_set_progress
):
"""Tests whether the functions that execute the different pipeline steps are called
correctly."""
# add datasets
db.session.add_all(
[self.coolerfile, self.bedfile, self.intervals1, self.intervals2]
)
db.session.commit()
# launch task
binsize = 10000
dataset_id = 2
intervals_id = 2
pileup_types = ["ICCF", "Obs/Exp"]
pipeline_pileup(dataset_id, intervals_id, binsize)
# construct call arguments, pd.dataframe breaks magicmocks interval methods
call_args = self.get_call_args_without_index(mock_pileup_pipeline_step, 3)
# compare expected call arguments with actual call arguments
for pileup_type in pileup_types:
# check whether the current combination is in call args list
expected_call_args = [dataset_id, intervals_id, binsize, pileup_type]
self.assertTrue(expected_call_args in call_args)
# check whether number of calls was as expected
self.assertEqual(len(call_args), len(pileup_types))
# check whether last call to set task progress was 100
mock_set_progress.assert_called_with(100)
@patch("app.pipeline_steps.pd.read_csv")
@patch("app.pipeline_steps.set_task_progress")
@patch("app.pipeline_steps.pileup_pipeline_step")
def test_dataset_state_not_changed_if_not_last(
self, mock_pileup, mock_set_progress, mock_read_csv
):
"""tests whether dataset state is left unchanged if it is not the last task for
this dataset/intervals combination."""
# set up database
self.bedfile.processing_features = [self.coolerfile]
db.session.add_all(
[self.bedfile, self.coolerfile, self.intervals1, self.unfinished_task1]
)
# call pipeline
pipeline_pileup(2, 1, 10000)
# check whether processing has finished
self.assertEqual(self.bedfile.processing_features, [self.coolerfile])
@patch("app.pipeline_steps.pd.read_csv")
@patch("app.pipeline_steps.set_task_progress")
@patch("app.pipeline_steps.pileup_pipeline_step")
def test_dataset_set_finished_if_last(
self, mock_pileup, mock_set_progress, mock_read_csv
):
"""tests whether dataset is set finished correctly if it is the last task for
this dataset/intervals combination."""
# set up database
self.bedfile.processing_features = [self.coolerfile]
db.session.add_all(
[self.bedfile, self.coolerfile, self.intervals1, self.finished_task1]
)
# call pipeline
pipeline_pileup(2, 1, 10000)
# check whether processing has finished
self.assertEqual(self.bedfile.processing_features, [])
@patch("app.pipeline_steps.log.error")
@patch("app.pipeline_steps.pd.read_csv")
@patch("app.pipeline_steps.set_task_progress")
@patch("app.pipeline_steps.pileup_pipeline_step")
def test_dataset_set_failed_if_failed(
self, mock_pileup, mock_set_progress, mock_read_csv, mock_log
):
"""tests whether dataset is set as faild if problem arises."""
# set up exception raising
mock_pileup.side_effect = ValueError("Test")
# set up database
self.bedfile.processing_features = [self.coolerfile]
db.session.add_all(
[self.bedfile, self.coolerfile, self.intervals1, self.unfinished_task1]
)
# call pipeline
pipeline_pileup(2, 1, 10000)
# check whether processing has finished
self.assertEqual(self.bedfile.failed_features, [self.coolerfile])
self.assertEqual(self.bedfile.processing_features, [])
assert mock_log.called
class TestPileupPipelineStep(LoginTestCase, TempDirTestCase):
"""Test pileup worker functions for point and interval features."""
def setUp(self):
"""Add test dataset"""
# call setUp of LoginTestCase to initialize app
super().setUp()
# add assembly
self.hg19 = Assembly(
id=1,
name="hg19",
chrom_sizes=self.app.config["CHROM_SIZES"],
chrom_arms=self.app.config["CHROM_ARMS"],
)
db.session.add(self.hg19)
db.session.commit()
# add dataset
self.dataset = Dataset(
dataset_name="test3",
file_path="/test/path/test3.mcool",
filetype="cooler",
processing_state="finished",
user_id=1,
assembly=1,
)
self.dataset2 = Dataset(
dataset_name="test4",
file_path="/test/path/test4.mcool",
filetype="cooler",
processing_state="finished",
user_id=1,
assembly=1,
)
# add intervals
self.intervals1 = Intervals(name="testRegion1", dataset_id=1, windowsize=200000)
self.intervals2 = Intervals(name="testRegion2", dataset_id=1, windowsize=300000)
self.intervals3 = Intervals(name="testRegion2", dataset_id=1, windowsize=None)
db.session.add(self.dataset)
db.session.add(self.dataset2)
db.session.add(self.intervals1)
db.session.add(self.intervals2)
db.session.add(self.intervals3)
db.session.commit()
@patch("app.pipeline_steps.worker_funcs._do_pileup_fixed_size")
@patch("app.pipeline_steps.worker_funcs._do_pileup_variable_size")
def test_correct_pileup_worker_function_used_point_feature(
self, mock_pileup_variable_size, mock_pileup_fixed_size
):
"""Tests whether correct worker function for pileup is used
when intervals has fixed windowsizes"""
# add return values
mock_pileup_fixed_size.return_value = np.full((2, 2, 2), np.nan)
# dispatch call
dataset_id = 1
intervals_id = 1
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
pileup_pipeline_step(dataset_id, intervals_id, 10000, arms, "ICCF")
# check whether pileup with fixed size was called and with variable size was not called
mock_pileup_fixed_size.assert_called_once()
mock_pileup_variable_size.assert_not_called()
@patch("app.pipeline_steps.worker_funcs._do_pileup_fixed_size")
@patch("app.pipeline_steps.worker_funcs._do_pileup_variable_size")
def test_correct_pileup_worker_function_used_interval_feature(
self, mock_pileup_variable_size, mock_pileup_fixed_size
):
"""Tests whether correct worker function for pileup is used
when intervals has variable windowsizes"""
# add return values
mock_pileup_variable_size.return_value = np.full((2, 2, 2), np.nan)
# dispatch call
dataset_id = 1
intervals_id = 3
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
pileup_pipeline_step(dataset_id, intervals_id, 10000, arms, "ICCF")
# check whether pileup with fixed size was called and with variable size was not called
mock_pileup_variable_size.assert_called_once()
mock_pileup_fixed_size.assert_not_called()
@patch("app.pipeline_steps.uuid.uuid4")
@patch("app.pipeline_steps.worker_funcs._add_embedding_2d_to_db")
@patch("app.pipeline_steps.worker_funcs._add_pileup_db")
@patch("app.pipeline_steps.worker_funcs._do_pileup_fixed_size")
@patch("app.pipeline_steps.worker_funcs._do_pileup_variable_size")
def test_adding_to_db_called_correctly(
self,
mock_pileup_variable_size,
mock_pileup_fixed_size,
mock_add_pileup_db,
mock_add_embedding_db,
mock_uuid,
):
"""Tests whether function to add result to database is called correctly."""
# add return values
mock_pileup_fixed_size.return_value = np.full((2, 2, 2), np.nan)
# hack in return value of uuid4().hex to be asdf
uuid4 = MagicMock()
type(uuid4).hex = PropertyMock(return_value="asdf")
mock_uuid.return_value = uuid4
# construct call args
dataset_id = 1
intervals_id = 1
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
pileup_pipeline_step(dataset_id, intervals_id, 10000, arms, "ICCF")
# check whether adding to pileup db is called correctly
mock_add_pileup_db.assert_called_with(
self.app.config["UPLOAD_DIR"] + "/asdf.npy",
10000,
self.intervals1.id,
self.dataset.id,
"ICCF",
)
# check whether adding embedding to db is called correctly
mock_add_embedding_db.assert_any_call(
{
"embedding": self.app.config["UPLOAD_DIR"] + "/asdf_embedding.npy",
"cluster_ids": self.app.config["UPLOAD_DIR"]
+ "/asdf_cluster_ids_small.npy",
"thumbnails": self.app.config["UPLOAD_DIR"]
+ "/asdf_thumbnails_small.npy",
},
10000,
self.intervals1.id,
self.dataset.id,
"ICCF",
"small",
)
mock_add_embedding_db.assert_any_call(
{
"embedding": self.app.config["UPLOAD_DIR"] + "/asdf_embedding.npy",
"cluster_ids": self.app.config["UPLOAD_DIR"]
+ "/asdf_cluster_ids_large.npy",
"thumbnails": self.app.config["UPLOAD_DIR"]
+ "/asdf_thumbnails_large.npy",
},
10000,
self.intervals1.id,
self.dataset.id,
"ICCF",
"large",
)
class TestPileupWorkerFunctionsFixedSize(LoginTestCase, TempDirTestCase):
"""Test pileup worker functions for fixed sized intervals."""
def setUp(self):
"""Add test dataset"""
# call setUp of LoginTestCase to initialize app
super().setUp()
# add assembly
self.hg19 = Assembly(
id=1,
name="hg19",
chrom_sizes=self.app.config["CHROM_SIZES"],
chrom_arms=self.app.config["CHROM_ARMS"],
)
db.session.add(self.hg19)
db.session.commit()
# add dataset
self.cooler = Dataset(
dataset_name="test3",
file_path="./tests/testfiles/test.mcool",
filetype="cooler",
processing_state="finished",
user_id=1,
assembly=1,
)
db.session.add(self.cooler)
db.session.commit()
@patch("app.pipeline_worker_functions.HT.do_pileup_iccf")
@patch("app.pipeline_worker_functions.HT.do_pileup_obs_exp")
@patch("app.pipeline_worker_functions.HT.get_expected")
@patch("app.pipeline_worker_functions.HT.assign_regions")
@patch("app.pipeline_worker_functions.cooler.Cooler")
@patch("app.pipeline_worker_functions.pd.read_csv")
def test_correct_functions_called_obs_exp(
self,
mock_read_csv,
mock_cooler,
mock_assign_regions,
mock_get_expected,
mock_pileup_obs_exp,
mock_pileup_iccf,
):
"""Tests whether correct pileup function is called when obs/exp pileup is dispatched"""
test_df_interval = pd.DataFrame(
{0: ["chr1", "chr1"], 1: [0, 1000], 2: [1000, 2000]}
)
mock_read_csv.return_value = test_df_interval
mock_cooler.return_value = "mock_cooler"
returned_regions = MagicMock()
mock_assign_regions.return_value = returned_regions
mock_get_expected.return_value = pd.DataFrame()
# dispatch call
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
_do_pileup_fixed_size(self.cooler, 100000, 10000, "testpath", arms, "Obs/Exp")
# check whether get_expected was called
mock_get_expected.assert_called()
mock_pileup_obs_exp.assert_called()
# check whether iccf pileup is not called
mock_pileup_iccf.assert_not_called()
@patch("app.pipeline_worker_functions.HT.do_pileup_iccf")
@patch("app.pipeline_worker_functions.HT.do_pileup_obs_exp")
@patch("app.pipeline_worker_functions.HT.get_expected")
@patch("app.pipeline_worker_functions.HT.assign_regions")
@patch("app.pipeline_worker_functions.cooler.Cooler")
@patch("app.pipeline_worker_functions.pd.read_csv")
def test_correct_functions_called_iccf(
self,
mock_read_csv,
mock_cooler,
mock_assign_regions,
mock_get_expected,
mock_pileup_obs_exp,
mock_pileup_iccf,
):
"""Tests whether correct pileup function is called when iccf pileup is dispatched"""
test_df_interval = pd.DataFrame(
{0: ["chr1", "chr1"], 1: [0, 1000], 2: [1000, 2000]}
)
mock_read_csv.return_value = test_df_interval
mock_cooler.return_value = "mock_cooler"
returned_regions = MagicMock()
mock_assign_regions.return_value = returned_regions
mock_get_expected.return_value = "expected"
# dispatch call
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
_do_pileup_fixed_size(self.cooler, 100000, 10000, "testpath", arms, "ICCF")
# check whether get_expected was called
mock_get_expected.assert_not_called()
expected_pileup_call = ["mock_cooler", returned_regions.dropna()]
mock_pileup_iccf.assert_called_with(
*expected_pileup_call, proc=2, collapse=True
)
# check whether iccf pileup is not called
mock_pileup_obs_exp.assert_not_called()
def test_regions_with_bad_chromosomes_filled_with_nan(self):
"""Checks whether regions with bad chromosomes are filled with nans"""
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
with patch("app.pipeline_worker_functions.pd.read_csv") as mock_read_csv:
test_df_interval = pd.DataFrame(
{
0: ["chr1", "chrASDF", "chr1", "chrASDF"],
1: [60000000, 10, 50000000, 100],
2: [60000000, 10, 50000000, 150],
}
)
mock_read_csv.return_value = test_df_interval
# dispatch call
result = _do_pileup_fixed_size(
self.cooler, 10000000, 5000000, "testpath", arms, "ICCF", collapse=False
)
self.assertEqual(result.shape[2], 4)
test_array_one = np.array(
[
[0.13717751, 0.0265284, 0.01462106, 0.009942, 0.00682112],
[0.0265284, 0.18850834, 0.06237434, 0.0145492, 0.01485787],
[0.01462106, 0.06237434, 0.119365, 0.04225391, 0.01654861],
[0.009942, 0.0145492, 0.04225391, 0.12408607, 0.05381814],
[0.00682112, 0.01485787, 0.01654861, 0.05381814, 0.14363506],
]
)
test_array_two = np.array(
[
[0.23130276, 0.02327701, 0.0126868, 0.00436247, 0.00401918],
[0.02327701, 0.15173886, 0.07788348, 0.01425616, 0.01083477],
[0.0126868, 0.07788348, 0.13717751, 0.0265284, 0.01462106],
[0.00436247, 0.01425616, 0.0265284, 0.18850834, 0.06237434],
[0.00401918, 0.01083477, 0.01462106, 0.06237434, 0.119365],
]
)
self.assertTrue(np.allclose(result[..., 0], test_array_one))
self.assertTrue(np.all(np.isnan(result[..., 1])))
self.assertTrue(np.allclose(result[..., 2], test_array_two))
self.assertTrue(np.all(np.isnan(result[..., 3])))
def test_cooler_w_missing_resolutions_return_nans_w_collapse(self):
"""Tests whether calling pileup on a cooler with
a binsize that is not available returns an array of nans
with the right shape according to windowsize and binsize"""
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
with patch("app.pipeline_worker_functions.pd.read_csv") as mock_read_csv:
test_df_interval = pd.DataFrame(
{
0: ["chr1"],
1: [60000000],
2: [60000000],
}
)
mock_read_csv.return_value = test_df_interval
# dispatch call
result = _do_pileup_fixed_size(
self.cooler, 200000, 10000, "testpath", arms, "ICCF", collapse=True
)
self.assertEqual(result.shape, (40, 40))
self.assertTrue(np.all(np.isnan(result)))
def test_cooler_w_missing_resolutions_return_nans_wo_collapse(self):
"""Tests whether calling pileup on a cooler with
a binsize that is not available returns an array of nans
with the right shape according to windowsize and binsize"""
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
with patch("app.pipeline_worker_functions.pd.read_csv") as mock_read_csv:
test_df_interval = pd.DataFrame(
{
0: ["chr1", "chr1"],
1: [60000000, 10000],
2: [60000000, 10000],
}
)
mock_read_csv.return_value = test_df_interval
# dispatch call
result = _do_pileup_fixed_size(
self.cooler, 200000, 10000, "testpath", arms, "ICCF", collapse=False
)
self.assertEqual(result.shape, (40, 40, 2))
self.assertTrue(np.all(np.isnan(result)))
@patch("app.pipeline_worker_functions.HT.get_expected")
def test_cached_obs_exp_used(self, mock_expected):
"""Tests whether cached obs/exp dataset is used"""
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
obs_exp = ObsExp(
dataset_id=self.cooler.id,
binsize=5000000,
filepath=os.path.join("./tests/testfiles", "expected.csv"),
)
db.session.add(obs_exp)
db.session.commit()
# create mock regions
test_df_interval = pd.DataFrame(
{
0: ["chr1", "chr1"],
1: [60000000, 10000],
2: [60000000, 10000],
}
)
mock_path = os.path.join(self.app.config["UPLOAD_DIR"], "mock_regions.csv")
test_df_interval.to_csv(mock_path, index=False, header=None, sep="\t")
# dispatch call
_do_pileup_fixed_size(
self.cooler, 10000000, 5000000, mock_path, arms, "Obs/Exp", collapse=False
)
mock_expected.assert_not_called()
def test_calculated_obs_exp_cached(self):
"""Tests whether cached obs/exp dataset is created if it does not exist already"""
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
# create mock regions
test_df_interval = pd.DataFrame(
{
0: ["chr1", "chr1"],
1: [60000000, 10000],
2: [60000000, 10000],
}
)
mock_path = os.path.join(self.app.config["UPLOAD_DIR"], "mock_regions.csv")
test_df_interval.to_csv(mock_path, index=False, header=None, sep="\t")
# dispatch call
_do_pileup_fixed_size(
self.cooler, 10000000, 5000000, mock_path, arms, "Obs/Exp", collapse=False
)
self.assertEqual(1, len(ObsExp.query.all()))
dataset = ObsExp.query.first()
self.assertEqual(dataset.dataset_id, self.cooler.id)
self.assertEqual(dataset.binsize, 5000000)
# load datset
expected = pd.read_csv(os.path.join("./tests/testfiles", "expected.csv"))
calculated = pd.read_csv(dataset.filepath)
| assert_frame_equal(expected, calculated) | pandas.testing.assert_frame_equal |
import ast
import json
import os
import sys
import uuid
import lxml
import networkx as nx
import pandas as pd
import geopandas as gpd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from shapely.geometry import LineString, Polygon, Point
from genet.core import Network
from genet.inputs_handler import matsim_reader
from tests.test_outputs_handler_matsim_xml_writer import network_dtd, schedule_dtd
from genet.schedule_elements import Route, Service, Schedule
from genet.utils import plot, spatial
from genet.inputs_handler import read
from tests.fixtures import assert_semantically_equal, route, stop_epsg_27700, network_object_from_test_data, \
full_fat_default_config_path, correct_schedule, vehicle_definitions_config_path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
pt2matsim_network_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "network.xml"))
pt2matsim_schedule_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "schedule.xml"))
puma_network_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "puma", "network.xml"))
puma_schedule_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "puma", "schedule.xml"))
simplified_network = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "simplified_network", "network.xml"))
simplified_schedule = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "simplified_network", "schedule.xml"))
network_link_attrib_text_missing = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "network_link_attrib_text_missing.xml"))
@pytest.fixture()
def network1():
n1 = Network('epsg:27700')
n1.add_node('101982',
{'id': '101982',
'x': '528704.1425925883',
'y': '182068.78193707118',
'lon': -0.14625948709424305,
'lat': 51.52287873323954,
's2_id': 5221390329378179879})
n1.add_node('101986',
{'id': '101986',
'x': '528835.203274008',
'y': '182006.27331298392',
'lon': -0.14439428709377497,
'lat': 51.52228713323965,
's2_id': 5221390328605860387})
n1.add_link('0', '101982', '101986',
attribs={'id': '0',
'from': '101982',
'to': '101986',
'freespeed': 4.166666666666667,
'capacity': 600.0,
'permlanes': 1.0,
'oneway': '1',
'modes': ['car'],
's2_from': 5221390329378179879,
's2_to': 5221390328605860387,
'length': 52.765151087870265,
'attributes': {'osm:way:access': {'name': 'osm:way:access',
'class': 'java.lang.String',
'text': 'permissive'},
'osm:way:highway': {'name': 'osm:way:highway',
'class': 'java.lang.String',
'text': 'unclassified'},
'osm:way:id': {'name': 'osm:way:id',
'class': 'java.lang.Long',
'text': '26997928'},
'osm:way:name': {'name': 'osm:way:name',
'class': 'java.lang.String',
'text': 'Brunswick Place'}}})
return n1
@pytest.fixture()
def network2():
n2 = Network('epsg:4326')
n2.add_node('101982',
{'id': '101982',
'x': -0.14625948709424305,
'y': 51.52287873323954,
'lon': -0.14625948709424305,
'lat': 51.52287873323954,
's2_id': 5221390329378179879})
n2.add_node('101990',
{'id': '101990',
'x': -0.14770188709624754,
'y': 51.5205729332399,
'lon': -0.14770188709624754,
'lat': 51.5205729332399,
's2_id': 5221390304444511271})
n2.add_link('0', '101982', '101990',
attribs={'id': '0',
'from': '101982',
'to': '101990',
'freespeed': 4.166666666666667,
'capacity': 600.0,
'permlanes': 1.0,
'oneway': '1',
'modes': ['car'],
's2_from': 5221390329378179879,
's2_to': 5221390304444511271,
'length': 52.765151087870265,
'attributes': {'osm:way:access': {'name': 'osm:way:access',
'class': 'java.lang.String',
'text': 'permissive'},
'osm:way:highway': {'name': 'osm:way:highway',
'class': 'java.lang.String',
'text': 'unclassified'},
'osm:way:id': {'name': 'osm:way:id',
'class': 'java.lang.Long',
'text': '26997928'},
'osm:way:name': {'name': 'osm:way:name',
'class': 'java.lang.String',
'text': 'Brunswick Place'}}})
return n2
def test_network_graph_initiates_as_not_simplififed():
n = Network('epsg:27700')
assert not n.graph.graph['simplified']
def test__repr__shows_graph_info_and_schedule_info():
n = Network('epsg:4326')
assert 'instance at' in n.__repr__()
assert 'graph' in n.__repr__()
assert 'schedule' in n.__repr__()
def test__str__shows_info():
n = Network('epsg:4326')
assert 'Graph info' in n.__str__()
assert 'Schedule info' in n.__str__()
def test_reproject_changes_x_y_values_for_all_nodes(network1):
network1.reproject('epsg:4326')
nodes = dict(network1.nodes())
correct_nodes = {
'101982': {'id': '101982', 'x': -0.14625948709424305, 'y': 51.52287873323954, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'101986': {'id': '101986', 'x': -0.14439428709377497, 'y': 51.52228713323965, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}}
target_change_log = pd.DataFrame(
{'timestamp': {3: '2020-07-09 19:50:51', 4: '2020-07-09 19:50:51'}, 'change_event': {3: 'modify', 4: 'modify'},
'object_type': {3: 'node', 4: 'node'}, 'old_id': {3: '101982', 4: '101986'},
'new_id': {3: '101982', 4: '101986'}, 'old_attributes': {
3: "{'id': '101982', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
4: "{'id': '101986', 'x': '528835.203274008', 'y': '182006.27331298392', 'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}"},
'new_attributes': {
3: "{'id': '101982', 'x': -0.14625948709424305, 'y': 51.52287873323954, 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
4: "{'id': '101986', 'x': -0.14439428709377497, 'y': 51.52228713323965, 'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}"},
'diff': {3: [('change', 'x', ('528704.1425925883', -0.14625948709424305)),
('change', 'y', ('182068.78193707118', 51.52287873323954))],
4: [('change', 'x', ('528835.203274008', -0.14439428709377497)),
('change', 'y', ('182006.27331298392', 51.52228713323965))]}}
)
assert_semantically_equal(nodes, correct_nodes)
for i in [3, 4]:
assert_semantically_equal(ast.literal_eval(target_change_log.loc[i, 'old_attributes']),
ast.literal_eval(network1.change_log.loc[i, 'old_attributes']))
assert_semantically_equal(ast.literal_eval(target_change_log.loc[i, 'new_attributes']),
ast.literal_eval(network1.change_log.loc[i, 'new_attributes']))
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'diff']
assert_frame_equal(network1.change_log[cols_to_compare].tail(2), target_change_log[cols_to_compare],
check_dtype=False)
def test_reproject_delegates_reprojection_to_schedules_own_method(network1, route, mocker):
mocker.patch.object(Schedule, 'reproject')
network1.schedule = Schedule(epsg='epsg:27700', services=[Service(id='id', routes=[route])])
network1.reproject('epsg:4326')
network1.schedule.reproject.assert_called_once_with('epsg:4326', 1)
def test_reproject_updates_graph_crs(network1):
network1.reproject('epsg:4326')
assert network1.graph.graph['crs'] == {'init': 'epsg:4326'}
def test_reprojecting_links_with_geometries():
n = Network('epsg:27700')
n.add_nodes({'A': {'x': -82514.72274, 'y': 220772.02798},
'B': {'x': -82769.25894, 'y': 220773.0637}})
n.add_links({'1': {'from': 'A', 'to': 'B',
'geometry': LineString([(-82514.72274, 220772.02798),
(-82546.23894, 220772.88254),
(-82571.87107, 220772.53339),
(-82594.92709, 220770.68385),
(-82625.33255, 220770.45579),
(-82631.26842, 220770.40158),
(-82669.7309, 220770.04349),
(-82727.94946, 220770.79793),
(-82757.38528, 220771.75412),
(-82761.82425, 220771.95614),
(-82769.25894, 220773.0637)])}})
n.reproject('epsg:2157')
geometry_coords = list(n.link('1')['geometry'].coords)
assert round(geometry_coords[0][0], 7) == 532006.5605980
assert round(geometry_coords[0][1], 7) == 547653.3751768
assert round(geometry_coords[-1][0], 7) == 531753.4315189
assert round(geometry_coords[-1][1], 7) == 547633.5224837
def test_adding_the_same_networks():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_the_same_networks_but_with_differing_projections():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right.reproject('epsg:4326')
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_networks_with_clashing_node_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('10', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('20', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('1', '10', '20', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_networks_with_clashing_link_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('10', '1', '2', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_networks_with_clashing_multiindices():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', 0, attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', 0, attribs={'modes': ['walk', 'bike']})
n_left.add(n_right)
assert len(list(n_left.nodes())) == 2
assert n_left.node('1') == {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}
assert n_left.node('2') == {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}
assert len(n_left.link_id_mapping) == 2
assert n_left.link('1') == {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}
assert n_left.graph['1']['2'][0] == {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}
def test_adding_disjoint_networks_with_unique_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('10', {'id': '1', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 1})
n_right.add_node('20', {'id': '2', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 2})
n_right.add_link('100', '10', '20', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {'10': {'id': '1', 'x': 1, 'y': 1, 'lon': 1, 'lat': 1, 's2_id': 1},
'20': {'id': '2', 'x': 1, 'y': 1, 'lon': 1, 'lat': 1, 's2_id': 2},
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954,
's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965,
's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'100': {'modes': ['walk'], 'from': '10', 'to': '20', 'id': '100'},
'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_disjoint_networks_with_clashing_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 1})
n_right.add_node('2', {'id': '2', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 2})
n_right.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_left.add(n_right)
assert len(list(n_left.nodes())) == 4
assert n_left.node('1') == {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}
assert n_left.node('2') == {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}
assert len(n_left.link_id_mapping) == 2
assert n_left.link('1') == {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}
def test_adding_simplified_network_and_not_throws_error():
n = Network('epsg:2770')
m = Network('epsg:2770')
m.graph.graph['simplified'] = True
with pytest.raises(RuntimeError) as error_info:
n.add(m)
assert "cannot add" in str(error_info.value)
def test_print_shows_info(mocker):
mocker.patch.object(Network, 'info')
n = Network('epsg:27700')
n.print()
n.info.assert_called_once()
def test_plot_delegates_to_util_plot_plot_graph_routes(mocker):
mocker.patch.object(plot, 'plot_graph_routes')
n = Network('epsg:27700')
n.plot()
plot.plot_graph_routes.assert_called_once()
def test_plot_graph_delegates_to_util_plot_plot_graph(mocker):
mocker.patch.object(plot, 'plot_graph')
n = Network('epsg:27700')
n.plot_graph()
plot.plot_graph.assert_called_once()
def test_plot_schedule_delegates_to_util_plot_plot_non_routed_schedule_graph(mocker, network_object_from_test_data):
mocker.patch.object(plot, 'plot_non_routed_schedule_graph')
n = network_object_from_test_data
n.plot_schedule()
plot.plot_non_routed_schedule_graph.assert_called_once()
def test_attempt_to_simplify_already_simplified_network_throws_error():
n = Network('epsg:27700')
n.graph.graph["simplified"] = True
with pytest.raises(RuntimeError) as error_info:
n.simplify()
assert "cannot simplify" in str(error_info.value)
def test_simplifing_puma_network_results_in_correct_record_of_removed_links_and_expected_graph_data():
n = read.read_matsim(path_to_network=puma_network_test_file, epsg='epsg:27700',
path_to_schedule=puma_schedule_test_file)
link_ids_pre_simplify = set(dict(n.links()).keys())
n.simplify()
assert n.is_simplified()
link_ids_post_simplify = set(dict(n.links()).keys())
assert link_ids_post_simplify & link_ids_pre_simplify
new_links = link_ids_post_simplify - link_ids_pre_simplify
deleted_links = link_ids_pre_simplify - link_ids_post_simplify
assert set(n.link_simplification_map.keys()) == deleted_links
assert set(n.link_simplification_map.values()) == new_links
assert (set(n.link_id_mapping.keys()) & new_links) == new_links
report = n.generate_validation_report()
assert report['routing']['services_have_routes_in_the_graph']
assert report['schedule']['schedule_level']['is_valid_schedule']
def test_simplified_network_saves_to_correct_dtds(tmpdir, network_dtd, schedule_dtd):
n = read.read_matsim(path_to_network=puma_network_test_file, epsg='epsg:27700',
path_to_schedule=puma_schedule_test_file)
n.simplify()
n.write_to_matsim(tmpdir)
generated_network_file_path = os.path.join(tmpdir, 'network.xml')
xml_obj = lxml.etree.parse(generated_network_file_path)
assert network_dtd.validate(xml_obj), \
'Doc generated at {} is not valid against DTD due to {}'.format(generated_network_file_path,
network_dtd.error_log.filter_from_errors())
generated_schedule_file_path = os.path.join(tmpdir, 'schedule.xml')
xml_obj = lxml.etree.parse(generated_schedule_file_path)
assert schedule_dtd.validate(xml_obj), \
'Doc generated at {} is not valid against DTD due to {}'.format(generated_network_file_path,
schedule_dtd.error_log.filter_from_errors())
def test_simplifying_network_with_multi_edges_resulting_in_multi_paths():
n = Network('epsg:27700')
n.add_nodes({
'n_-1': {'x': -1, 'y': -1, 's2_id': -1},
'n_0': {'x': 0, 'y': 0, 's2_id': 0},
'n_1': {'x': 1, 'y': 1, 's2_id': 1},
'n_2': {'x': 2, 'y': 2, 's2_id': 2},
'n_3': {'x': 3, 'y': 3, 's2_id': 3},
'n_4': {'x': 4, 'y': 4, 's2_id': 4},
'n_5': {'x': 5, 'y': 5, 's2_id': 5},
'n_6': {'x': 6, 'y': 5, 's2_id': 6},
})
n.add_links({
'l_-1': {'from': 'n_-1', 'to': 'n_1', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_0': {'from': 'n_0', 'to': 'n_1', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_1': {'from': 'n_1', 'to': 'n_2', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_2': {'from': 'n_1', 'to': 'n_2', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_3': {'from': 'n_2', 'to': 'n_3', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_4': {'from': 'n_2', 'to': 'n_3', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_5': {'from': 'n_3', 'to': 'n_4', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_6': {'from': 'n_3', 'to': 'n_4', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_7': {'from': 'n_4', 'to': 'n_5', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_8': {'from': 'n_4', 'to': 'n_6', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}}
})
n.simplify()
assert set(n.link_simplification_map) == {'l_4', 'l_1', 'l_5', 'l_3', 'l_6', 'l_2'}
def test_reading_back_simplified_network():
# simplified networks have additional geometry attribute and some of their attributes are composite, e.g. links
# now refer to a number of osm ways each with a unique id
n = read.read_matsim(path_to_network=simplified_network, epsg='epsg:27700',
path_to_schedule=simplified_schedule)
number_of_simplified_links = 659
links_with_geometry = n.extract_links_on_edge_attributes(conditions={'geometry': lambda x: True})
assert len(links_with_geometry) == number_of_simplified_links
for link in links_with_geometry:
attribs = n.link(link)
if 'attributes' in attribs:
assert not 'geometry' in attribs['attributes']
for k, v in attribs['attributes'].items():
if isinstance(v['text'], str):
assert not ',' in v['text']
def test_network_with_missing_link_attribute_elem_text_is_read_and_able_to_save_again(tmpdir):
n = read.read_matsim(path_to_network=network_link_attrib_text_missing, epsg='epsg:27700')
n.write_to_matsim(tmpdir)
def test_node_attribute_data_under_key_returns_correct_pd_series_with_nested_keys():
n = Network('epsg:27700')
n.add_node(1, {'a': {'b': 1}})
n.add_node(2, {'a': {'b': 4}})
output_series = n.node_attribute_data_under_key(key={'a': 'b'})
assert_series_equal(output_series, pd.Series({1: 1, 2: 4}))
def test_node_attribute_data_under_key_returns_correct_pd_series_with_flat_keys():
n = Network('epsg:27700')
n.add_node(1, {'b': 1})
n.add_node(2, {'b': 4})
output_series = n.node_attribute_data_under_key(key='b')
assert_series_equal(output_series, pd.Series({1: 1, 2: 4}))
def test_node_attribute_data_under_keys(network1):
df = network1.node_attribute_data_under_keys(['x', 'y'])
df_to_compare = pd.DataFrame({'x': {'101982': '528704.1425925883', '101986': '528835.203274008'},
'y': {'101982': '182068.78193707118', '101986': '182006.27331298392'}})
assert_frame_equal(df, df_to_compare)
def test_node_attribute_data_under_keys_with_named_index(network1):
df = network1.node_attribute_data_under_keys(['x', 'y'], index_name='index')
assert df.index.name == 'index'
def test_node_attribute_data_under_keys_generates_key_for_nested_data(network1):
network1.add_node('1', {'key': {'nested_value': {'more_nested': 4}}})
df = network1.node_attribute_data_under_keys([{'key': {'nested_value': 'more_nested'}}])
assert isinstance(df, pd.DataFrame)
assert 'key::nested_value::more_nested' in df.columns
def test_node_attribute_data_under_keys_returns_dataframe_with_one_col_if_passed_one_key(network1):
df = network1.node_attribute_data_under_keys(['x'], index_name='index')
assert isinstance(df, pd.DataFrame)
assert len(df.columns) == 1
def test_link_attribute_data_under_key_returns_correct_pd_series_with_nested_keys():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': {'b': 1}})
n.add_link('1', 1, 2, attribs={'a': {'b': 4}})
output_series = n.link_attribute_data_under_key(key={'a': 'b'})
assert_series_equal(output_series, pd.Series({'0': 1, '1': 4}))
def test_link_attribute_data_under_key_returns_correct_pd_series_with_flat_keys():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'b': 1})
n.add_link('1', 1, 2, attribs={'b': 4})
output_series = n.link_attribute_data_under_key(key='b')
assert_series_equal(output_series, pd.Series({'0': 1, '1': 4}))
def test_link_attribute_data_under_keys(network1):
df = network1.link_attribute_data_under_keys(['modes', 'freespeed', 'capacity', 'permlanes'])
df_to_compare = pd.DataFrame({'modes': {'0': ['car']}, 'freespeed': {'0': 4.166666666666667},
'capacity': {'0': 600.0}, 'permlanes': {'0': 1.0}})
assert_frame_equal(df, df_to_compare)
def test_link_attribute_data_under_keys_with_named_index(network1):
df = network1.link_attribute_data_under_keys(['modes', 'freespeed', 'capacity', 'permlanes'], index_name='index')
assert df.index.name == 'index'
def test_link_attribute_data_under_keys_returns_dataframe_with_one_col_if_passed_one_key(network1):
df = network1.link_attribute_data_under_keys(['modes'])
assert isinstance(df, pd.DataFrame)
assert len(df.columns) == 1
def test_link_attribute_data_under_keys_generates_key_for_nested_data(network1):
df = network1.link_attribute_data_under_keys([{'attributes': {'osm:way:access': 'text'}}])
assert isinstance(df, pd.DataFrame)
assert 'attributes::osm:way:access::text' in df.columns
def test_add_node_adds_node_to_graph_with_attribs():
n = Network('epsg:27700')
n.add_node(1, {'a': 1})
assert n.graph.has_node(1)
assert n.node(1) == {'a': 1}
def test_add_node_adds_node_to_graph_without_attribs():
n = Network('epsg:27700')
n.add_node(1)
assert n.node(1) == {}
assert n.graph.has_node(1)
def test_add_multiple_nodes():
n = Network('epsg:27700')
reindexing_dict, actual_nodes_added = n.add_nodes({1: {'x': 1, 'y': 2}, 2: {'x': 2, 'y': 2}})
assert n.graph.has_node(1)
assert n.node(1) == {'x': 1, 'y': 2, 'id': 1}
assert n.graph.has_node(2)
assert n.node(2) == {'x': 2, 'y': 2, 'id': 2}
assert reindexing_dict == {}
def test_add_nodes_with_clashing_ids():
n = Network('epsg:27700')
n.add_node(1, {})
reindexing_dict, actual_nodes_added = n.add_nodes({1: {'x': 1, 'y': 2}, 2: {'x': 2, 'y': 2}})
assert n.graph.has_node(1)
assert n.node(1) == {}
assert n.graph.has_node(2)
assert n.node(2) == {'x': 2, 'y': 2, 'id': 2}
assert 1 in reindexing_dict
assert n.graph.has_node(reindexing_dict[1])
assert n.node(reindexing_dict[1]) == {'x': 1, 'y': 2, 'id': reindexing_dict[1]}
def test_add_nodes_with_multiple_clashing_ids():
n = Network('epsg:27700')
n.add_node(1, {})
n.add_node(2, {})
assert n.graph.has_node(1)
assert n.node(1) == {}
assert n.graph.has_node(2)
assert n.node(2) == {}
reindexing_dict, actual_nodes_added = n.add_nodes({1: {'x': 1, 'y': 2}, 2: {'x': 2, 'y': 2}})
assert 1 in reindexing_dict
assert n.graph.has_node(reindexing_dict[1])
assert n.node(reindexing_dict[1]) == {'x': 1, 'y': 2, 'id': reindexing_dict[1]}
assert 2 in reindexing_dict
assert n.graph.has_node(reindexing_dict[2])
assert n.node(reindexing_dict[2]) == {'x': 2, 'y': 2, 'id': reindexing_dict[2]}
def test_add_edge_generates_a_link_id_and_delegated_to_add_link_id(mocker):
mocker.patch.object(Network, 'add_link')
mocker.patch.object(Network, 'generate_index_for_edge', return_value='12345')
n = Network('epsg:27700')
n.add_edge(1, 2, attribs={'a': 1})
Network.generate_index_for_edge.assert_called_once()
Network.add_link.assert_called_once_with('12345', 1, 2, None, {'a': 1}, False)
def test_add_edge_generates_a_link_id_with_specified_multiidx(mocker):
mocker.patch.object(Network, 'add_link')
mocker.patch.object(Network, 'generate_index_for_edge', return_value='12345')
n = Network('epsg:27700')
n.add_edge(1, 2, multi_edge_idx=10, attribs={'a': 1})
Network.generate_index_for_edge.assert_called_once()
Network.add_link.assert_called_once_with('12345', 1, 2, 10, {'a': 1}, False)
def test_adding_multiple_edges():
n = Network('epsg:27700')
n.add_edges([{'from': 1, 'to': 2}, {'from': 2, 'to': 3}])
assert n.graph.has_edge(1, 2)
assert n.graph.has_edge(2, 3)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
if n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}:
assert n.link_id_mapping['1'] == {'from': 2, 'to': 3, 'multi_edge_idx': 0}
elif n.link_id_mapping['1'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}:
assert n.link_id_mapping['0'] == {'from': 2, 'to': 3, 'multi_edge_idx': 0}
else:
raise AssertionError()
def test_adding_multiple_edges_between_same_nodes():
n = Network('epsg:27700')
n.add_edges([{'from': 1, 'to': 2}, {'from': 1, 'to': 2}, {'from': 1, 'to': 2}, {'from': 2, 'to': 3}])
assert n.graph.has_edge(1, 2)
assert n.graph.number_of_edges(1, 2) == 3
assert n.graph.has_edge(2, 3)
assert len(n.link_id_mapping) == 4
def test_add_link_adds_edge_to_graph_with_attribs():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
assert n.graph.has_edge(1, 2)
assert '0' in n.link_id_mapping
assert n.edge(1, 2) == {0: {'a': 1, 'from': 1, 'id': '0', 'to': 2}}
def test_add_link_adds_edge_to_graph_without_attribs():
n = Network('epsg:27700')
n.add_link('0', 1, 2)
n.graph.has_edge(1, 2)
assert '0' in n.link_id_mapping
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
def test_adding_multiple_links():
n = Network('epsg:27700')
n.add_links({'0': {'from': 1, 'to': 2}, '1': {'from': 2, 'to': 3}})
assert n.graph.has_edge(1, 2)
assert n.graph.has_edge(2, 3)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
assert n.link_id_mapping['1'] == {'from': 2, 'to': 3, 'multi_edge_idx': 0}
def test_adding_multiple_links_with_id_clashes():
n = Network('epsg:27700')
n.add_link('0', 10, 20)
assert '0' in n.link_id_mapping
reindexing_dict, links_and_attribs = n.add_links({'0': {'from': 1, 'to': 2}, '1': {'from': 2, 'to': 3}})
assert '1' in n.link_id_mapping
assert '0' in reindexing_dict
assert len(n.link_id_mapping) == 3
assert_semantically_equal(links_and_attribs[reindexing_dict['0']], {'from': 1, 'to': 2, 'id': reindexing_dict['0']})
assert_semantically_equal(links_and_attribs['1'], {'from': 2, 'to': 3, 'id': '1'})
def test_adding_multiple_links_with_multiple_id_clashes():
n = Network('epsg:27700')
n.add_link('0', 10, 20)
n.add_link('1', 10, 20)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
reindexing_dict, links_and_attribs = n.add_links({'0': {'from': 1, 'to': 2}, '1': {'from': 2, 'to': 3}})
assert '0' in reindexing_dict
assert '1' in reindexing_dict
assert len(n.link_id_mapping) == 4
assert_semantically_equal(links_and_attribs[reindexing_dict['0']], {'from': 1, 'to': 2, 'id': reindexing_dict['0']})
assert_semantically_equal(links_and_attribs[reindexing_dict['1']], {'from': 2, 'to': 3, 'id': reindexing_dict['1']})
def test_adding_loads_of_multiple_links_between_same_nodes():
n = Network('epsg:27700')
reindexing_dict, links_and_attribs = n.add_links({i: {'from': 1, 'to': 2} for i in range(10)})
assert_semantically_equal(links_and_attribs, {i: {'from': 1, 'to': 2, 'id': i} for i in range(10)})
assert_semantically_equal(n.link_id_mapping, {i: {'from': 1, 'to': 2, 'multi_edge_idx': i} for i in range(10)})
def test_adding_multiple_links_with_multi_idx_clashes():
n = Network('epsg:27700')
n.add_link('0', 1, 2)
n.add_link('1', 1, 2)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
n.add_links({'2': {'from': 1, 'to': 2}, '3': {'from': 1, 'to': 2}, '4': {'from': 2, 'to': 3}})
assert n.link_id_mapping['2'] == {'from': 1, 'to': 2, 'multi_edge_idx': 2}
assert n.link_id_mapping['3'] == {'from': 1, 'to': 2, 'multi_edge_idx': 3}
assert n.link_id_mapping['4'] == {'from': 2, 'to': 3, 'multi_edge_idx': 0}
def test_adding_multiple_links_with_id_and_multi_idx_clashes():
n = Network('epsg:27700')
n.add_link('0', 1, 2)
n.add_link('1', 1, 2)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
reindexing_dict, links_and_attribs = n.add_links(
{'0': {'from': 1, 'to': 2}, '1': {'from': 1, 'to': 2}, '2': {'from': 2, 'to': 3}})
assert '0' in reindexing_dict
assert '1' in reindexing_dict
assert len(n.link_id_mapping) == 5
assert_semantically_equal(n.link_id_mapping[reindexing_dict['0']], {'from': 1, 'to': 2, 'multi_edge_idx': 2})
assert_semantically_equal(n.link_id_mapping[reindexing_dict['1']], {'from': 1, 'to': 2, 'multi_edge_idx': 3})
def test_adding_multiple_links_missing_some_from_nodes():
n = Network('epsg:27700')
with pytest.raises(RuntimeError) as error_info:
n.add_links({'0': {'to': 2}, '1': {'from': 2, 'to': 3}})
assert "You are trying to add links which are missing `from` (origin) nodes" in str(error_info.value)
def test_adding_multiple_links_missing_from_nodes_completely():
n = Network('epsg:27700')
with pytest.raises(RuntimeError) as error_info:
n.add_links({'0': {'to': 2}, '1': {'to': 3}})
assert "You are trying to add links which are missing `from` (origin) nodes" in str(error_info.value)
def test_adding_multiple_links_missing_some_to_nodes():
n = Network('epsg:27700')
with pytest.raises(RuntimeError) as error_info:
n.add_links({'0': {'from': 2}, '1': {'from': 2, 'to': 3}})
assert "You are trying to add links which are missing `to` (destination) nodes" in str(error_info.value)
def test_adding_multiple_links_missing_to_nodes_completely():
n = Network('epsg:27700')
with pytest.raises(RuntimeError) as error_info:
n.add_links({'0': {'from': 2}, '1': {'from': 2}})
assert "You are trying to add links which are missing `to` (destination) nodes" in str(error_info.value)
def test_adding_links_with_different_non_overlapping_attributes():
# generates a nan attribute for link attributes
n = Network('epsg:27700')
reindexing_dict, links_and_attributes = n.add_links({
'2': {'from': 1, 'to': 2, 'speed': 20},
'3': {'from': 1, 'to': 2, 'capacity': 123},
'4': {'from': 2, 'to': 3, 'modes': [1, 2, 3]}})
assert reindexing_dict == {}
assert_semantically_equal(links_and_attributes, {
'2': {'id': '2', 'from': 1, 'to': 2, 'speed': 20},
'3': {'id': '3', 'from': 1, 'to': 2, 'capacity': 123},
'4': {'id': '4', 'from': 2, 'to': 3, 'modes': [1, 2, 3]}})
def test_adding_multiple_links_to_same_edge_clashing_with_existing_edge():
n = Network('epsg:27700')
n.add_link(link_id='0', u='2', v='2', attribs={'speed': 20})
n.add_links({'1': {'from': '2', 'to': '2', 'something': 20},
'2': {'from': '2', 'to': '2', 'capacity': 123}})
assert_semantically_equal(dict(n.links()), {'0': {'speed': 20, 'from': '2', 'to': '2', 'id': '0'},
'1': {'from': '2', 'to': '2', 'something': 20.0, 'id': '1'},
'2': {'from': '2', 'to': '2', 'capacity': 123.0, 'id': '2'}})
assert_semantically_equal(n.link_id_mapping, {'0': {'from': '2', 'to': '2', 'multi_edge_idx': 0},
'1': {'from': '2', 'to': '2', 'multi_edge_idx': 1},
'2': {'from': '2', 'to': '2', 'multi_edge_idx': 2}})
def test_network_modal_subgraph_using_general_subgraph_on_link_attribs():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
car_graph = n.subgraph_on_link_conditions(conditions={'modes': 'car'}, mixed_dtypes=True)
assert list(car_graph.edges) == [(1, 2, 0), (2, 3, 0)]
def test_modes():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
n.add_link('3', 2, 3, attribs={})
assert n.modes() == {'car', 'bike'}
def test_network_modal_subgraph_using_specific_modal_subgraph_method_single_mode():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
car_graph = n.modal_subgraph(modes='car')
assert list(car_graph.edges) == [(1, 2, 0), (2, 3, 0)]
def test_network_modal_subgraph_using_specific_modal_subgraph_method_several_modes():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
n.add_link('3', 2, 3, attribs={'modes': ['walk']})
car_bike_graph = n.modal_subgraph(modes=['car', 'bike'])
assert list(car_bike_graph.edges) == [(1, 2, 0), (2, 3, 0), (2, 3, 1)]
def test_links_on_modal_condition():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
n.add_link('3', 2, 3, attribs={'modes': ['walk']})
car_links = n.links_on_modal_condition(modes=['car'])
assert set(car_links) == {'0', '1'}
def test_nodes_on_modal_condition():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
n.add_link('3', 2, 3, attribs={'modes': ['walk']})
car_nodes = n.nodes_on_modal_condition(modes=['car'])
assert set(car_nodes) == {1, 2, 3}
test_geojson = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "test_geojson.geojson"))
def test_nodes_on_spatial_condition_with_geojson(network_object_from_test_data):
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
nodes = network_object_from_test_data.nodes_on_spatial_condition(test_geojson)
assert set(nodes) == {'21667818', '25508485'}
def test_nodes_on_spatial_condition_with_shapely_geom(network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
nodes = network_object_from_test_data.nodes_on_spatial_condition(region)
assert set(nodes) == {'21667818', '25508485'}
def test_nodes_on_spatial_condition_with_s2_region(network_object_from_test_data):
region = '48761ad04d,48761ad054,48761ad05c,48761ad061,48761ad085,48761ad08c,48761ad094,48761ad09c,48761ad0b,48761ad0d,48761ad0f,48761ad14,48761ad182c,48761ad19c,48761ad1a4,48761ad1ac,48761ad1b4,48761ad1bac,48761ad3d7f,48761ad3dc,48761ad3e4,48761ad3ef,48761ad3f4,48761ad3fc,48761ad41,48761ad43,48761ad5d,48761ad5e4,48761ad5ec,48761ad5fc,48761ad7,48761ad803,48761ad81c,48761ad824,48761ad82c,48761ad9d,48761ad9e4,48761ad9e84,48761ad9fc,48761ada04,48761ada0c,48761b2804,48761b2814,48761b281c,48761b283,48761b2844,48761b284c,48761b2995,48761b29b4,48761b29bc,48761b29d,48761b29f,48761b2a04'
network_object_from_test_data.add_node(
'1', {'id': '1', 'x': 508400, 'y': 162050, 's2_id': spatial.generate_index_s2(51.3472033, 0.4449167)})
nodes = network_object_from_test_data.nodes_on_spatial_condition(region)
assert set(nodes) == {'21667818', '25508485'}
def test_links_on_spatial_condition_with_geojson(network_object_from_test_data):
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(test_geojson)
assert set(links) == {'1', '2'}
def test_links_on_spatial_condition_with_shapely_geom(network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(region)
assert set(links) == {'1', '2'}
def test_links_on_spatial_condition_with_s2_region(network_object_from_test_data):
region = '48761ad04d,48761ad054,48761ad05c,48761ad061,48761ad085,48761ad08c,48761ad094,48761ad09c,48761ad0b,48761ad0d,48761ad0f,48761ad14,48761ad182c,48761ad19c,48761ad1a4,48761ad1ac,48761ad1b4,48761ad1bac,48761ad3d7f,48761ad3dc,48761ad3e4,48761ad3ef,48761ad3f4,48761ad3fc,48761ad41,48761ad43,48761ad5d,48761ad5e4,48761ad5ec,48761ad5fc,48761ad7,48761ad803,48761ad81c,48761ad824,48761ad82c,48761ad9d,48761ad9e4,48761ad9e84,48761ad9fc,48761ada04,48761ada0c,48761b2804,48761b2814,48761b281c,48761b283,48761b2844,48761b284c,48761b2995,48761b29b4,48761b29bc,48761b29d,48761b29f,48761b2a04'
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(region)
assert set(links) == {'1', '2'}
def test_links_on_spatial_condition_with_intersection_and_complex_geometry_that_falls_outside_region(
network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_link(
'2', u='21667818', v='25508485',
attribs={'geometry': LineString(
[(528504.1342843144, 182155.7435136598), (508400, 162050), (528489.467895946, 182206.20303669578)])})
links = network_object_from_test_data.links_on_spatial_condition(region, how='intersect')
assert set(links) == {'1', '2'}
def test_links_on_spatial_condition_with_containement(network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(region, how='within')
assert set(links) == {'1'}
def test_links_on_spatial_condition_with_containement_and_complex_geometry_that_falls_outside_region(
network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_link(
'2', u='21667818', v='25508485',
attribs={'geometry': LineString(
[(528504.1342843144, 182155.7435136598), (508400, 162050), (528489.467895946, 182206.20303669578)])})
links = network_object_from_test_data.links_on_spatial_condition(region, how='within')
assert set(links) == {'1'}
def test_links_on_spatial_condition_with_containement_and_s2_region(network_object_from_test_data):
region = '48761ad04d,48761ad054,48761ad05c,48761ad061,48761ad085,48761ad08c,48761ad094,48761ad09c,48761ad0b,48761ad0d,48761ad0f,48761ad14,48761ad182c,48761ad19c,48761ad1a4,48761ad1ac,48761ad1b4,48761ad1bac,48761ad3d7f,48761ad3dc,48761ad3e4,48761ad3ef,48761ad3f4,48761ad3fc,48761ad41,48761ad43,48761ad5d,48761ad5e4,48761ad5ec,48761ad5fc,48761ad7,48761ad803,48761ad81c,48761ad824,48761ad82c,48761ad9d,48761ad9e4,48761ad9e84,48761ad9fc,48761ada04,48761ada0c,48761b2804,48761b2814,48761b281c,48761b283,48761b2844,48761b284c,48761b2995,48761b29b4,48761b29bc,48761b29d,48761b29f,48761b2a04'
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(region, how='within')
assert set(links) == {'1'}
def test_links_on_spatial_condition_with_containement_and_complex_geometry_that_falls_outside_s2_region(
network_object_from_test_data):
region = '48761ad04d,48761ad054,48761ad05c,48761ad061,48761ad085,48761ad08c,48761ad094,48761ad09c,48761ad0b,48761ad0d,48761ad0f,48761ad14,48761ad182c,48761ad19c,48761ad1a4,48761ad1ac,48761ad1b4,48761ad1bac,48761ad3d7f,48761ad3dc,48761ad3e4,48761ad3ef,48761ad3f4,48761ad3fc,48761ad41,48761ad43,48761ad5d,48761ad5e4,48761ad5ec,48761ad5fc,48761ad7,48761ad803,48761ad81c,48761ad824,48761ad82c,48761ad9d,48761ad9e4,48761ad9e84,48761ad9fc,48761ada04,48761ada0c,48761b2804,48761b2814,48761b281c,48761b283,48761b2844,48761b284c,48761b2995,48761b29b4,48761b29bc,48761b29d,48761b29f,48761b2a04'
network_object_from_test_data.add_link(
'2', u='21667818', v='25508485',
attribs={'geometry': LineString(
[(528504.1342843144, 182155.7435136598), (508400, 162050), (528489.467895946, 182206.20303669578)])})
links = network_object_from_test_data.links_on_spatial_condition(region, how='within')
assert set(links) == {'1'}
def test_find_shortest_path_when_graph_has_no_extra_edge_choices():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1})
n.add_link('1', 2, 3, attribs={'modes': ['car'], 'length': 1})
n.add_link('2', 2, 3, attribs={'modes': ['bike'], 'length': 1})
n.add_link('3', 2, 3, attribs={'modes': ['walk'], 'length': 1})
bike_route = n.find_shortest_path(1, 3, modes='bike')
assert bike_route == ['0', '2']
def test_find_shortest_path_when_subgraph_is_pre_computed():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1})
n.add_link('1', 2, 3, attribs={'modes': ['car'], 'length': 1})
n.add_link('2', 2, 3, attribs={'modes': ['bike'], 'length': 1})
n.add_link('3', 2, 3, attribs={'modes': ['walk'], 'length': 1})
bike_g = n.modal_subgraph(modes='bike')
bike_route = n.find_shortest_path(1, 3, subgraph=bike_g)
assert bike_route == ['0', '2']
def test_find_shortest_path_defaults_to_full_graph():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1})
n.add_link('1', 2, 3, attribs={'modes': ['car'], 'freespeed': 3})
n.add_link('2', 2, 3, attribs={'modes': ['bike'], 'freespeed': 2})
n.add_link('3', 2, 3, attribs={'modes': ['walk'], 'freespeed': 1})
bike_route = n.find_shortest_path(1, 3)
assert bike_route == ['0', '1']
def test_find_shortest_path_when_graph_has_extra_edge_choice_for_freespeed_that_is_obvious():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('2', 2, 3, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('3', 2, 3, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 1})
bike_route = n.find_shortest_path(1, 3, modes='bike')
assert bike_route == ['0', '2']
def test_find_shortest_path_when_graph_has_extra_edge_choice_with_attractive_mode():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('2', 2, 3, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('3', 2, 3, attribs={'modes': ['bike'], 'length': 1, 'freespeed': 1})
bike_route = n.find_shortest_path(1, 3, modes='bike')
assert bike_route == ['0', '3']
def test_find_shortest_path_and_return_just_nodes():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('1', 2, 3, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
bike_route = n.find_shortest_path(1, 3, return_nodes=True)
assert bike_route == [1, 2, 3]
def test_add_link_adds_link_with_specific_multi_idx():
n = Network('epsg:27700')
n.add_link('0', 1, 2, 0)
assert '0' in n.link_id_mapping
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
assert n.graph[1][2][0] == {'from': 1, 'to': 2, 'id': '0'}
def test_add_link_generates_new_multi_idx_if_already_exists():
n = Network('epsg:27700')
n.add_link('0', 1, 2, 0)
n.add_link('1', 1, 2, 0)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
assert n.graph[1][2][0] == {'from': 1, 'to': 2, 'id': '0'}
assert n.link_id_mapping['1']['multi_edge_idx'] != 0
assert n.graph[1][2][n.link_id_mapping['1']['multi_edge_idx']] == {'from': 1, 'to': 2, 'id': '1'}
def test_reindex_node(network1):
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert network1.link('0')['from'] == '101982'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
assert network1.link_id_mapping['0']['from'] == '101982'
network1.reindex_node('101982', '007')
assert [id for id, attribs in network1.nodes()] == ['007', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert network1.link('0')['from'] == '007'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('007', '101986')]
assert network1.link_id_mapping['0']['from'] == '007'
correct_change_log_df = pd.DataFrame(
{'timestamp': {3: '2020-06-08 19:39:08', 4: '2020-06-08 19:39:08', 5: '2020-06-08 19:39:08'},
'change_event': {3: 'modify', 4: 'modify', 5: 'modify'}, 'object_type': {3: 'link', 4: 'node', 5: 'node'},
'old_id': {3: '0', 4: '101982', 5: '101982'}, 'new_id': {3: '0', 4: '007', 5: '101982'}, 'old_attributes': {
3: "{'id': '0', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}",
4: "{'id': '101982', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
5: "{'id': '101982', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}"},
'new_attributes': {
3: "{'id': '0', 'from': '007', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}",
4: "{'id': '007', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
5: "{'id': '007', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}"},
'diff': {3: [('change', 'from', ('101982', '007'))],
4: [('change', 'id', ('101982', '007')), ('change', 'id', ('101982', '007'))],
5: [('change', 'id', ('101982', '007'))]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(network1.change_log[cols_to_compare].tail(3), correct_change_log_df[cols_to_compare],
check_names=False,
check_dtype=False)
def test_reindex_node_when_node_id_already_exists(network1):
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert network1.link('0')['from'] == '101982'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
assert network1.link_id_mapping['0']['from'] == '101982'
network1.reindex_node('101982', '101986')
node_ids = [id for id, attribs in network1.nodes()]
assert '101986' in node_ids
assert '101982' not in node_ids
assert len(set(node_ids)) == 2
assert network1.node(node_ids[0]) != network1.node(node_ids[1])
def test_reindex_link(network1):
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert '0' in network1.link_id_mapping
assert network1.link('0')['from'] == '101982'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
assert network1.edge('101982', '101986')[0]['id'] == '0'
network1.reindex_link('0', '007')
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['007']
assert '0' not in network1.link_id_mapping
assert '007' in network1.link_id_mapping
assert network1.link('007')['from'] == '101982'
assert network1.link('007')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
assert network1.edge('101982', '101986')[0]['id'] == '007'
correct_change_log_df = pd.DataFrame(
{'timestamp': {3: '2020-06-08 19:34:48', 4: '2020-06-08 19:34:48'}, 'change_event': {3: 'modify', 4: 'modify'},
'object_type': {3: 'link', 4: 'link'}, 'old_id': {3: '0', 4: '0'}, 'new_id': {3: '007', 4: '0'},
'old_attributes': {
3: "{'id': '0', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}",
4: "{'id': '0', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}"},
'new_attributes': {
3: "{'id': '007', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}",
4: "{'id': '007', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}"},
'diff': {3: [('change', 'id', ('0', '007')), ('change', 'id', ('0', '007'))],
4: [('change', 'id', ('0', '007'))]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(network1.change_log[cols_to_compare].tail(2), correct_change_log_df[cols_to_compare],
check_names=False, check_dtype=False)
def test_reindex_link_when_link_id_already_exists(network1):
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert network1.link('0')['from'] == '101982'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
network1.add_link('1', '101986', '101982', attribs={})
network1.reindex_link('0', '1')
link_ids = [id for id, attribs in network1.links()]
assert '1' in link_ids
assert '0' not in link_ids
assert len(set(link_ids)) == 2
assert network1.link(link_ids[0]) != network1.link(link_ids[1])
def test_modify_node_adds_attributes_in_the_graph_and_change_is_recorded_by_change_log():
n = Network('epsg:27700')
n.add_node(1, {'a': 1})
n.apply_attributes_to_node(1, {'b': 1})
assert n.node(1) == {'b': 1, 'a': 1}
correct_change_log_df = pd.DataFrame(
{'timestamp': {0: '2020-05-28 13:49:53', 1: '2020-05-28 13:49:53'}, 'change_event': {0: 'add', 1: 'modify'},
'object_type': {0: 'node', 1: 'node'}, 'old_id': {0: None, 1: 1}, 'new_id': {0: 1, 1: 1},
'old_attributes': {0: None, 1: "{'a': 1}"}, 'new_attributes': {0: "{'a': 1}", 1: "{'a': 1, 'b': 1}"},
'diff': {0: [('add', '', [('a', 1)]), ('add', 'id', 1)], 1: [('add', '', [('b', 1)])]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare], correct_change_log_df[cols_to_compare], check_names=False,
check_dtype=False)
def test_modify_node_overwrites_existing_attributes_in_the_graph_and_change_is_recorded_by_change_log():
n = Network('epsg:27700')
n.add_node(1, {'a': 1})
n.apply_attributes_to_node(1, {'a': 4})
assert n.node(1) == {'a': 4}
correct_change_log_df = pd.DataFrame(
{'timestamp': {0: '2020-05-28 13:49:53', 1: '2020-05-28 13:49:53'}, 'change_event': {0: 'add', 1: 'modify'},
'object_type': {0: 'node', 1: 'node'}, 'old_id': {0: None, 1: 1}, 'new_id': {0: 1, 1: 1},
'old_attributes': {0: None, 1: "{'a': 1}"}, 'new_attributes': {0: "{'a': 1}", 1: "{'a': 4}"},
'diff': {0: [('add', '', [('a', 1)]), ('add', 'id', 1)], 1: [('change', 'a', (1, 4))]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare], correct_change_log_df[cols_to_compare], check_dtype=False)
def test_modify_nodes_adds_and_changes_attributes_in_the_graph_and_change_is_recorded_by_change_log():
n = Network('epsg:27700')
n.add_node(1, {'a': 1})
n.add_node(2, {'b': 1})
n.apply_attributes_to_nodes({1: {'a': 4}, 2: {'a': 1}})
assert n.node(1) == {'a': 4}
assert n.node(2) == {'b': 1, 'a': 1}
correct_change_log_df = pd.DataFrame(
{'timestamp': {0: '2020-06-01 15:07:51', 1: '2020-06-01 15:07:51', 2: '2020-06-01 15:07:51',
3: '2020-06-01 15:07:51'}, 'change_event': {0: 'add', 1: 'add', 2: 'modify', 3: 'modify'},
'object_type': {0: 'node', 1: 'node', 2: 'node', 3: 'node'}, 'old_id': {0: None, 1: None, 2: 1, 3: 2},
'new_id': {0: 1, 1: 2, 2: 1, 3: 2}, 'old_attributes': {0: None, 1: None, 2: "{'a': 1}", 3: "{'b': 1}"},
'new_attributes': {0: "{'a': 1}", 1: "{'b': 1}", 2: "{'a': 4}", 3: "{'b': 1, 'a': 1}"},
'diff': {0: [('add', '', [('a', 1)]), ('add', 'id', 1)], 1: [('add', '', [('b', 1)]), ('add', 'id', 2)],
2: [('change', 'a', (1, 4))], 3: [('add', '', [('a', 1)])]}
})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare], correct_change_log_df[cols_to_compare], check_dtype=False)
def multiply_node_attribs(node_attribs):
return node_attribs['a'] * node_attribs['c']
def test_apply_function_to_nodes():
n = Network('epsg:27700')
n.add_node('0', attribs={'a': 2, 'c': 3})
n.add_node('1', attribs={'c': 100})
n.apply_function_to_nodes(function=multiply_node_attribs, location='new_computed_attrib')
assert_semantically_equal(dict(n.nodes()),
{'0': {'a': 2, 'c': 3, 'new_computed_attrib': 6},
'1': {'c': 100}})
def test_apply_attributes_to_edge_without_filter_conditions():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
n.add_link('1', 1, 2, attribs={'b': 1})
n.apply_attributes_to_edge(1, 2, {'c': 1})
assert n.link('0') == {'a': 1, 'from': 1, 'to': 2, 'id': '0', 'c': 1}
assert n.link('1') == {'b': 1, 'from': 1, 'to': 2, 'id': '1', 'c': 1}
correct_change_log_df = pd.DataFrame(
{'timestamp': {2: '2020-07-10 14:53:25', 3: '2020-07-10 14:53:25'}, 'change_event': {2: 'modify', 3: 'modify'},
'object_type': {2: 'edge', 3: 'edge'}, 'old_id': {2: '(1, 2, 0)', 3: '(1, 2, 1)'},
'new_id': {2: '(1, 2, 0)', 3: '(1, 2, 1)'},
'old_attributes': {2: "{'a': 1, 'from': 1, 'to': 2, 'id': '0'}", 3: "{'b': 1, 'from': 1, 'to': 2, 'id': '1'}"},
'new_attributes': {2: "{'a': 1, 'from': 1, 'to': 2, 'id': '0', 'c': 1}",
3: "{'b': 1, 'from': 1, 'to': 2, 'id': '1', 'c': 1}"},
'diff': {2: [('add', '', [('c', 1)])], 3: [('add', '', [('c', 1)])]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare].tail(2), correct_change_log_df[cols_to_compare],
check_dtype=False)
def test_apply_attributes_to_edge_with_filter_conditions():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
n.add_link('1', 1, 2, attribs={'b': 1})
n.apply_attributes_to_edge(1, 2, {'c': 1}, conditions={'a': (0, 2)})
assert n.link('0') == {'a': 1, 'from': 1, 'to': 2, 'id': '0', 'c': 1}
assert n.link('1') == {'b': 1, 'from': 1, 'to': 2, 'id': '1'}
correct_change_log_df = pd.DataFrame(
{'timestamp': {2: '2020-07-10 14:53:25'}, 'change_event': {2: 'modify'},
'object_type': {2: 'edge'}, 'old_id': {2: '(1, 2, 0)'},
'new_id': {2: '(1, 2, 0)'},
'old_attributes': {2: "{'a': 1, 'from': 1, 'to': 2, 'id': '0'}"},
'new_attributes': {2: "{'a': 1, 'from': 1, 'to': 2, 'id': '0', 'c': 1}"},
'diff': {2: [('add', '', [('c', 1)])]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare].tail(1), correct_change_log_df[cols_to_compare],
check_dtype=False)
def test_apply_attributes_to_multiple_edges():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
n.add_link('1', 1, 2, attribs={'b': 1})
n.add_link('2', 2, 3, attribs={'c': 1})
n.add_link('3', 2, 3, attribs={'d': 1})
n.apply_attributes_to_edges({(1, 2): {'e': 1}, (2, 3): {'f': 1}})
assert n.link('0') == {'a': 1, 'from': 1, 'to': 2, 'id': '0', 'e': 1}
assert n.link('1') == {'b': 1, 'from': 1, 'to': 2, 'id': '1', 'e': 1}
assert n.link('2') == {'c': 1, 'from': 2, 'to': 3, 'id': '2', 'f': 1}
assert n.link('3') == {'d': 1, 'from': 2, 'to': 3, 'id': '3', 'f': 1}
def test_apply_attributes_to_multiple_edges_with_conditions():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
n.add_link('1', 1, 2, attribs={'b': 1})
n.add_link('2', 2, 3, attribs={'c': 1})
n.add_link('3', 2, 3, attribs={'d': 1})
n.apply_attributes_to_edges({(1, 2): {'e': 1}, (2, 3): {'f': 1}}, conditions=[{'a': (0, 2)}, {'c': (0, 2)}])
assert n.link('0') == {'a': 1, 'from': 1, 'to': 2, 'id': '0', 'e': 1}
assert n.link('1') == {'b': 1, 'from': 1, 'to': 2, 'id': '1'}
assert n.link('2') == {'c': 1, 'from': 2, 'to': 3, 'id': '2', 'f': 1}
assert n.link('3') == {'d': 1, 'from': 2, 'to': 3, 'id': '3'}
def test_modify_link_adds_attributes_in_the_graph_and_change_is_recorded_by_change_log():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
n.apply_attributes_to_link('0', {'b': 1})
assert n.link('0') == {'a': 1, 'from': 1, 'to': 2, 'id': '0', 'b': 1}
correct_change_log_df = pd.DataFrame(
{'timestamp': {0: '2020-06-12 20:02:49', 1: '2020-06-12 20:02:49'}, 'change_event': {0: 'add', 1: 'modify'},
'object_type': {0: 'link', 1: 'link'}, 'old_id': {0: None, 1: '0'}, 'new_id': {0: '0', 1: '0'},
'old_attributes': {0: None, 1: "{'a': 1, 'from': 1, 'to': 2, 'id': '0'}"},
'new_attributes': {0: "{'a': 1, 'from': 1, 'to': 2, 'id': '0'}",
1: "{'a': 1, 'from': 1, 'to': 2, 'id': '0', 'b': 1}"},
'diff': {0: [('add', '', [('a', 1), ('from', 1), ('to', 2), ('id', '0')]), ('add', 'id', '0')],
1: [('add', '', [('b', 1)])]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
| assert_frame_equal(n.change_log[cols_to_compare], correct_change_log_df[cols_to_compare], check_dtype=False) | pandas.testing.assert_frame_equal |
"""
Use generated models to perform predictions.
@author: eyu
"""
import os
import logging
import click
import glob
import pandas as pd
from datetime import date
from keras.models import load_model
from data_reader_stooq import StooqDataReader
from data_reader_yahoo import YahooDataReader
import talib as talib
import palib as palib
from taplot import TAPlot
from paplot import PAPlot
import network as net
import constants as c
# set logging level
logging.basicConfig(level=logging.INFO)
# format display width
| pd.set_option('display.width', 1000) | pandas.set_option |
import numpy as np
from at import *
from at.load import load_mat
from matplotlib import pyplot as plt
import matplotlib.pyplot as plt
import at.plot
import numpy as np
from pylab import *
import pandas as pd
import csv
from random import random
def defineMatrices( Nm, C0x, C0y, C0xy, C0yx, Cxx_err, Cyy_err, Cxy_err, Cyx_err, dCx, dCy, dCxy,dCyx ):
Nk = len(dCx) # number of free parameters
Nm = 40 # number of measurements
print('NK:', Nk)
print('Nm:', Nm)
Ax = np.zeros([Nk, Nk])
Ay = np.zeros([Nk, Nk])
Axy = np.zeros([Nk, Nk])
Ayx = np.zeros([Nk, Nk])
A = np.zeros([4 * Nk, Nk])
##
Bx = np.zeros([Nk, 1])
By = np.zeros([Nk, 1])
Bxy = np.zeros([Nk, 1])
Byx = np.zeros([Nk, 1])
B = np.zeros([4 * Nk, 1])
##
Dx = (Cxx_err[0:Nm, :] - C0x[0:Nm, :]) ### dk ?
Dy = (Cyy_err[0:Nm, :] - C0y[0:Nm, :])
Dxy = (Cxy_err[0:Nm, :] - C0xy[0:Nm, :])
Dyx = (Cyx_err[0:Nm, :] - C0yx[0:Nm, :])
##
for i in range(Nk): ## i represents each quad
# print('done A:', 100.* i ,'%')
for j in range(Nk):
Ax[i, j] = np.sum(np.dot(dCx[i], dCx[j].T))
Ay[i, j] = np.sum(np.dot(dCy[i], dCy[j].T))
Axy[i, j] = np.sum(np.dot(dCxy[i], dCxy[j].T))
Ayx[i, j] = np.sum(np.dot(dCyx[i], dCyx[j].T))
A[i, :] = Ax[i, :]
A[i + Nk, :] = Ay[i, :]
A[i + 2 * Nk, :] = Axy[i, :]
A[i + 3 * Nk, :] = Ayx[i, :]
##
for i in range(Nk):
Bx[i] = np.sum(np.dot(dCx[i], Dx.T))
By[i] = np.sum(np.dot(dCy[i], Dy.T))
Bxy[i] = np.sum(np.dot(dCxy[i], Dxy.T))
Byx[i] = np.sum(np.dot(dCyx[i], Dyx.T))
B[i] = Bx[i]
B[i + Nk] = By[i]
B[i + 2 * Nk] = Bxy[i]
B[i + 3 * Nk] = Byx[i]
return A, B
def getInverse(A, B,Nk, sCut):
u, s, v = np.linalg.svd(A, full_matrices=True)
smat = 0.0 * A
si = s ** -1
n_sv = sCut
si[n_sv:] *= 0.0
print("number of singular values {}".format(len(si)))
smat[:Nk, :Nk] = np.diag(si)
print('A' + str(A.shape), 'B' + str(B.shape), 'U' + str(u.shape), 'smat' + str(smat.shape), 'v' + str(v.shape))
plt.plot(np.log(s), 'd--')
plt.title('singular value')
plt.show()
plt.plot(si, 'd--')
plt.title('singular value inverse')
plt.show()
Ai = np.dot(v.transpose(), np.dot(smat.transpose(), u.transpose()))
###
r = (np.dot(Ai, B)).reshape(-1)
plot(r, 'd')
plt.show()
# error
e = np.dot(A, r).reshape(-1) - B.reshape(-1)
plt.plot(e)
plt.show()
plt.plot(B)
plt.show()
return Ai, r, e
def compare_orm(Cxy, Cxy_err, Cxy_corr, no):
# plot the 3 sets
plt.plot(Cxy[no], label='C')
plt.plot(Cxy_err[no], label='C_err')
plt.plot(Cxy_corr[no], label='C_corr')
# call with no parameters
plt.legend()
plt.show()
def generatingQuadsResponse(ring, Cxx, Cyy,Cxy, Cyx ):
# %%time
quads_info = quad_info(ring)
quad_dict, quad_vals = getQuadFamilies(quads_info)
quads = [k for k in quad_dict.keys()]
quad_names = quads
dk = 0.0001
qxx = []
qxy = []
qyy = []
qyx = []
quad_names = quads
for qname in quad_names:
print('generating response to {}, n={}'.format(qname, quad_dict[qname]))
nq = quad_dict[qname] + 1
for i in range(0, nq):
Qxx, Qxy, Qyy, Qyx = computeOpticsD(ring, qname, i, dk, quad_vals)
qxx.append(Qxx)
qxy.append(Qxy)
qyy.append(Qyy)
qyx.append(Qyx)
C0x = Cxx
C0y = Cyy
C0xy = Cxy
C0yx = Cyx
dCx = []
dCy = []
dCxy = []
dCyx = []
quad_names = quads
for qname in quad_names:
# nquad = quad_dict[qname]
print('loading response to:', qname)
i = 0
while (i < len(qxx)):
C1x = qxx[i]
C1y = qyy[i]
C1xy = qxy[i]
C1yx = qyx[i]
dcxx = ((C1x - C0x) / dk)
dcyy = ((C1y - C0y) / dk)
dCxy.append((C1xy - C0xy) / dk)
dCyx.append((C1yx - C0yx) / dk)
dCx.append(dcxx)
dCy.append(dcyy)
i += 1
return C0x, C0y, C0xy, C0yx, dCx, dCy, dCxy,dCyx
def setCorrection(ring, quads_info_error,quad_names, r , quads_info,n_list):
quad_dict, quad_vals = getQuadFamilies(quads_info_error)
n_list = len(quads_info_error.s_pos)
# print(n_list)
quad_names = quad_names
iq = 0
frac = 1.0
cor_dict = {}
for qname in quad_names:
nquad = quad_dict[qname]
# print(qname, quad_dict[qname])
for i in range(0, nquad):
cor_dict[qname, i + 1] = -r[iq] * frac
iq += 1
print("define correction : Done")
DK = []
for idx in range(n_list):
qname_ = quads_info.elements_name[idx] # ElementName
if qname_ == 'QF':
occ = quads_info_error.occ[idx]
dk = cor_dict['QF', occ]
DK.append(dk)
if qname_ == 'QD':
occ = quads_info_error.occ[idx]
dk = cor_dict['QD', occ]
DK.append(dk)
if qname_ == 'QS':
occ = quads_info_error.occ[idx]
dk = cor_dict['QS', occ]
DK.append(dk)
quads_indexes = get_refpts(ring, elements.Quadrupole)
i = 0
while (i < len(quads_indexes)):
ring[quads_indexes[i]].K += DK[i]
i += 1
print("set correction : Done")
def plotORM(orm):
plt.figure()
imshow(orm)
plt.show()
def getBetaBeat(twiss, twiss_error):
print("getBetaBeat bx and by: ")
bxi =[]
for i in range(len(twiss.betax)):
bxx = (twiss_error.betax[i] - twiss.betax[i]) / twiss.betax[i]
bxi.append(bxx)
byi =[]
for i in range(len(twiss.betay)):
byy = (twiss_error.betay[i] - twiss.betay[i]) / twiss.betay[i]
byi.append(byy)
bx = np.std((twiss_error.betax - twiss.betax) / twiss.betax)
by = np.std((twiss_error.betay - twiss.betay) / twiss.betay)
print("Simulated beta beat, x:" + str(bx * 100) + "% y: " + str(by* 100) + "%")
def make_plot(twiss, plot_name):
from mpl_toolkits.axes_grid1 import host_subplot
import matplotlib.pyplot as plt
host = host_subplot(111)
par = host.twinx()
host.set_xlabel("s_pos")
host.set_ylabel(r'$\beta_x$')
host.set_ylabel(r'$\beta_y$')
par.set_ylabel("dx")
p1, = host.plot(twiss.s_pos, twiss.betax, label=r'$\beta_x$')
p2, = host.plot(twiss.s_pos, twiss.betay, label=r'$\beta_y$')
p3, = par.plot(twiss.s_pos, twiss.dx, label=r'$\eta_x$')
p4, = par.plot(twiss.s_pos, twiss.dy, label=r'$\eta_y$')
leg = plt.legend()
host.yaxis.get_label().set_color(p1.get_color())
leg.texts[0].set_color(p1.get_color())
host.yaxis.get_label().set_color(p2.get_color())
leg.texts[1].set_color(p2.get_color())
par.yaxis.get_label().set_color(p3.get_color())
leg.texts[2].set_color(p3.get_color())
plt.title(plot_name)
plt.show()
def getOptics(ring, refpts):
elements_indexes = get_refpts(ring, refpts)
lindata0, tune, chrom, lindata = ring.linopt(get_chrom=True, refpts=elements_indexes)
closed_orbitx = lindata['closed_orbit'][:, 0]
closed_orbity = lindata['closed_orbit'][:, 2]
s_pos = lindata['s_pos']
closed_orbit = lindata['closed_orbit']
beta_x= lindata['beta'][:, 0]
beta_y= lindata['beta'][:, 1]
dx = lindata['dispersion'][:, 0]
dy = lindata['dispersion'][:, 2]
print("preparing twiss ..")
print(f"Tunes={ring.get_tune()}")
print(f"Chrom={ring.get_chrom()}")
elements_name = []
elements_strength = []
elements_type =[]
i = 0
while (i < len(elements_indexes)):
element_name = ring[i].FamName
elements_name.append(element_name)
if (ring[i].FamName == 'QF' or ring[i].FamName == 'QD'):
elements_Strength = ring[i].K
elements_strength.append(elements_Strength)
element_type = "elements.Quadrupole"
elements_type.append(element_type)
i += 1
else:
elements_Strength = 0
elements_strength.append(elements_Strength)
element_type = 0
elements_type.append(element_type)
i += 1
output = [elements_name[:e].count(v) for e, v in enumerate(elements_name,1)]
twiss1 = {'s_pos': s_pos,'betax': beta_x,
'betay': beta_y, 'dx': dx, 'dy': dy}
twiss = | pd.DataFrame(twiss1) | pandas.DataFrame |
# %%
'''
'''
## Se importan las librerias necesarias
import pandas as pd
import numpy as np
import datetime as dt
from datetime import timedelta
pd.options.display.max_columns = None
pd.options.display.max_rows = None
import glob as glob
import datetime
import re
import jenkspy
import tkinter as tk
root= tk.Tk()
canvas1 = tk.Canvas(root, width = 300, height = 300)
canvas1.pack()
# %%
def profiling():
#### Read Databases
datas=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/data_con_drop.csv',sep=';',encoding='utf-8',dtype='str')
salida=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/salida_limpia.csv',sep=';',encoding='utf-8',dtype='str')
seguimiento=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/seguimiento.csv',sep=';',encoding='utf-8',dtype='str')
virtuales=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/virtuales.csv',encoding='utf-8',sep=';')
df=datas.copy()
out=salida.copy()
seg=seguimiento.copy()
vir=virtuales.copy()
out.sort_values(['Identificacion Del Cliente','Fecha_Gestion'],inplace=True)
out=out[out['Repetido CC']=='0']
out=out[~out.duplicated(keep='last')]
## Cleaning
df['Marca Score']=df['Marca Score'].str.strip().fillna('NO REGISTRA')
df['Marca Score'][df['Marca Score']==''] ='NO REGISTRA'
df['Analisis De Habito']=df['Analisis De Habito'].fillna('NO DEFINE')
df['Analisis De Habito'][df['Analisis De Habito']==' '] ='NO DEFINE'
df['Tipo de Cliente'][df['Tipo de Cliente']==' '] ='NO DEFINE'
df['Marca Funcional']=df['Marca Funcional'].str.replace(' ','0')
df['Marca']=df['Marca'].str.replace(' ','0')
df['Antiguedad Cliente'][df['Antiguedad Cliente']==' '] ='NO REGISTRA'
df['Perfil Digital']=df['Perfil Digital'].fillna('Sin perfil')
df['Nivel de riesgo experian']=df['Nivel de riesgo experian'].str.replace(' ','NO REGISTRA')
df['Nivel de Riesgo']=df['Nivel de Riesgo'].str.replace(' ','NO REGISTRA')
df['Nivel Estrategia Cobro']=df['Nivel Estrategia Cobro'].str.replace(' ','NO REGISTRA')
df['Real reportado en central de riesgos']=df['Real reportado en central de riesgos'].str.replace(' ','0')
df['Nivel de Riesgo'][df['Nivel de Riesgo']==' '] ='NO REGISTRA'
df['Estado del Cliente'][df['Estado del Cliente']==' '] ='SIN IDENTIFICAR'
df['Tipificación Cliente'][df['Tipificación Cliente']==' '] ='SIN IDENTIFICAR'
df['Estrategia'][df['Estrategia']==' '] ='SIN ESTRATEGIA'
df['Autopago'][df['Autopago']==' '] ='NO APLICA'
df['Tipo de Cliente']=df['Tipo de Cliente'].fillna('NO DEFINE')
df['Tipo de Reporte a Central de Riesgos'][df['Tipo de Reporte a Central de Riesgos']==' '] ='NO REGISTRA'
df['Codigo edad de mora(para central de riesgos)']=df['Codigo edad de mora(para central de riesgos)'].str.replace(' ','NO REGISTRA')
df['Análisis Vector'][df['Análisis Vector']==' '] ='SIN IDENTIFICAR'
df['Análisis Vector_PAGOS_PARCIAL'] = np.where(df['Análisis Vector'].str.contains("PAGO PARCIAL|PAGOS PARCIAL"),"1",'0')
df['Análisis Vector_PAGO OPORTUNO'] = np.where(df['Análisis Vector'].str.contains("SIN PAGO|FINANCIAR"),"1",'0')
df['Análisis Vector_SIN_IDENTIFICAR'] = np.where(df['Análisis Vector'].str.contains("SIN IDENTIFICAR"),"1",'0')
df['Análisis Vector_SIN_PAGO'] = np.where(df['Análisis Vector'].str.contains("SIN PAGO|FINANCIAR"),"1",'0')
df['Análisis Vector_suspension'] = np.where(df['Análisis Vector'].str.contains("SUSPENSIO"),"1",'0')
df['Análisis Vector_indeterminado'] = np.where(df['Análisis Vector'].str.contains("PAGO OPORTUNO Y NO OPORTUNO"),"1",'0')
df['Análisis Vector_pago_no_oport'] = np.where(df['Análisis Vector'].str.contains("PAGO NO OPORTUNO"),"1",'0')
df['Análisis Vector_otro_caso'] = np.where(df['Análisis Vector'].str.contains("NUEVO|FACTURAS AJUSTADAS|PROBLEMAS RECLAMACION"),"1",'0')
df['Vector Cualitativo # Suscripción'][df['Vector Cualitativo # Suscripción']==' '] = df["Vector Cualitativo # Suscripción"].mode()[0]
df['Fecha Ult Gestion']=pd.to_datetime(df['Fecha Ult Gestion'],format='%Y-%m-%d')
###PARSE DATES AND CREATE NEW FEATURES
df['Fecha de Asignacion']=pd.to_datetime(df['Fecha de Asignacion'],format='%Y-%m-%d %H:%M:%S')
df['Fecha Ult pago']=pd.to_datetime(df['Fecha Ult pago'],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df['Fecha de cuenta de cobro mas antigua']=pd.to_datetime(df['Fecha de cuenta de cobro mas antigua'],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df["Dias_ult_pago"] = (df['Fecha Ult pago']).dt.day
df["dia_semana_ult_pago"] = (df['Fecha Ult pago']).dt.weekday
df["mes_ult_pago"]=df["Fecha Ult pago"].dt.month
df["semana_ult_pago"]=df["Fecha Ult pago"].dt.week
df["trimestre_ult_pago"] = df["Fecha Ult pago"].dt.quarter
df["año_ult_pago"] = df["Fecha Ult pago"].dt.year
df["DIAS_desde_ult_pago"] = (df["Fecha Ult Gestion"] - df["Fecha Ult pago"]).dt.days
df["Fecha estado corte"]=pd.to_datetime(df["Fecha estado corte"],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df["dias_ult_pago_cobro"] = (df["Fecha Ult pago"]-df["Fecha estado corte"]).dt.days
df["dias_ult_pago_fac_ant"] = (df["Fecha Ult pago"]-df["Fecha de cuenta de cobro mas antigua"]).dt.days
df['Fecha de Asignacion_mes']=df["Fecha de Asignacion"].dt.month
df['Fecha de Instalacion']=pd.to_datetime(df['Fecha de Instalacion'],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df['antiguedad_mes']=(dt.datetime.now()-df['Fecha de Instalacion']).dt.days/365
df['Fecha Retiro']=pd.to_datetime(df['Fecha Retiro'].str.replace('4732','2020'),format='%Y-%m-%d',errors = "coerce")
df['Fecha Vencimiento Sin Recargo']=pd.to_datetime(df['Fecha Vencimiento Sin Recargo'],format='%Y-%m-%d')
df['dias_desde_ult_gestion']=(dt.datetime.now()-df['Fecha Ult Gestion']).dt.days
## Group labels
df['Descripcion subcategoria']=df['Descripcion subcategoria']\
.str.replace('Consumos EPM Telco|INALAMBRICOS NO JAC|unica|COMERCIAL|ENTERPRISE|MONOPRODUCTO|PYME|------------------------------|LINEA BUZON','NO REGISTRA')\
.str.replace('ESTRATO MEDIO ALTO|MEDIO ALTO','ESTRATO 4')\
.str.replace('ESTRATO ALTO|ALTO','ESTRATO 6')\
.str.replace('ESTRATO MEDIO-BAJO|MEDIO BAJO','ESTRATO 2')\
.str.replace('ESTRATO MEDIO|MEDIO','ESTRATO 3')\
.str.replace('ESTRATO MEDIO-BAJO|MEDIO BAJO','ESTRATO 2')\
.str.replace('BAJO BAJO|ESTRATO BAJO-BAJO|ESTRATO BAJO|BAJO','ESTRATO 1')
df['Descripcion subcategoria'][df['Descripcion subcategoria']=='-'] ='NO REGISTRA' ## No registra
df['Tipificación Cliente'][df['Tipificación Cliente']==' '] = df["Tipificación Cliente"].mode()[0] ## Reemplazo con la moda
df['Dias Suspension'][df['Dias Suspension']==' ']=0
df['Dias Suspension']=df['Dias Suspension'].astype('int')
## Group labels
df['Descripcion producto']=df['Descripcion producto'].str.replace('-','').str.strip().str.upper()\
.str.replace('TELEVISION UNE|TELEVISION INTERACTIVA|TV CABLE|TV INTERACTIVA|UNE TV|TELEVISION SIN SEÃƑ‘AL|TELEVISION SIN SEÃƑ‘AL|TV CABLE SIN SEÑAL','TELEVISION')\
.str.replace('INTERNET BANDA ANCHA|SEGUNDA CONEXION INTERNET|BANDA ANCHA|INTERNET EDATEL|INTERNET INSTANTANEO|CABLE MODEM|INTERNET DEDICADO 11|ADSL BASICO','INTERNET')\
.str.replace('UNE MOVIL|COLOMBIAMOVIL BOGOTA|TIGO|ETB','UNEMOVIL')\
.str.replace('TOIP|TELEFONICA TELECOM|TELECOM|TO_SINVOZ','TELEFONIA')\
.str.replace('LÃƑÂNEA BÃƑ¡SICA','LINEA BASICA')
df['Descripcion categoria']=df['Descripcion categoria'].str.replace("[^a-zA-Z ]+", "NO REGISTRA")
df['Descripcion producto']=df['Descripcion producto'].str.replace('-','').str.strip()\
.str.replace('TELEVISION UNE|Television Interactiva|TV CABLE |TV INTERACTIVA|UNE TV|TELEVISIONSIN SEÑAL','TELEVISION')\
.str.replace('Internet Banda Ancha|Internet EDATEL|CABLE MODEM','INTERNET').str.replace('UNE MOVIL','UNEMOVIL')\
.str.replace('UNE MOVIL|COLOMBIAMOVIL BOGOTA','UNEMOVIL')\
.str.replace('TOIP','TELEFONIA')
df['Descripcion producto']=df['Descripcion producto'].str.strip().str.replace('-','')\
.str.replace('TELEVISION UNE|Television Interactiva|TV CABLE |TV INTERACTIVA|UNE TV','TELEVISION')\
.str.replace('Internet Banda Ancha','INTERNET').str.replace('UNE MOVIL','UNEMOVIL')
conteo3=df['Descripcion producto'].value_counts().iloc[:7].index.tolist()
df['Descripcion producto_resumen']=df.apply(
lambda row: row['Descripcion producto'] if (row['Descripcion producto'] in conteo3)
else 'OTRO PRODUCTO',axis=1)
df['Descripcion producto_resumen']=df['Descripcion producto_resumen'].str.strip()
df['Tipo Contactabilidad'][df['Tipo Contactabilidad']==' '] ='NO REGISTRA'
df['Indicador BI'][df['Indicador BI']==' '] ='NO REGISTRA'
## Create variable
df['antiguedad_mes']=df['antiguedad_mes'].astype(int)
col = 'antiguedad_mes'
condi = [ df[col] < 12, df[col].between(12, 24, inclusive = True),df[col]>24 ]
seg_ = [ "SEGMENTO YOUNG", 'SEGMENTO MASTER','SEGMENTO LEGEND']
df["Hogar"] = np.select(condi, seg_, default=np.nan)
df['Calificación A Nivel De Suscripción'][df['Calificación A Nivel De Suscripción']==' ']=df['Calificación A Nivel De Suscripción'].mode()[0]
df['Calificación A Nivel De Suscripción']=df['Calificación A Nivel De Suscripción'].astype('int')
df['Califica_suscr_class']=pd.cut(df['Calificación A Nivel De Suscripción'],bins=5,labels=["A","B","C","D","E"]).astype(str)
df['Tipo De Documento'][df['Tipo De Documento']=='13'] ='NO REGISTRA'
df['Tipo De Documento']=df['Tipo De Documento'].fillna('NO REGISTRA')
df['Tipo De Documento'][df['Tipo De Documento']=='1'] ='CC'
df['Tipo De Documento'][df['Tipo De Documento']==' '] ='NO REGISTRA'
df['Tipo De Documento'][df['Tipo De Documento']=='C'] ='NO REGISTRA'
df['Tipo De Documento']=df['Tipo De Documento'].str.replace('3 Cedula Extranjeria|3|1CE','CE')\
.str.replace('1 Cedula','CC')\
.str.replace('2 Nit|2',' Nit')\
.str.replace('4 Tarjeta de Identidad|4',' TI')
#### Create, clean & group variables
df['Banco 1'][df['Banco 1']==' '] ='NO REGISTRA'
df['Banco 2'][df['Banco 2']==' '] ='NO REGISTRA'
df['Banco 1'].fillna('NO REGISTRA',inplace=True)
df['Banco 2'].fillna('NO REGISTRA',inplace=True)
df['Banco 1']=df['Banco 1'].str.upper().str.strip()
df['Banco 2']=df['Banco 2'].str.upper().str.strip()
df['Banco 1']=df['Banco 1'].str.replace('BANCO COLPATRIA','COLPATRIA')\
.str.replace('COLPATRIA ENLINEA','COLPATRIA EN LINEA')\
.str.replace('GANA GANA','GANA')\
.str.replace('GANA GANA','GANA')
df["Banco 1_virtual"] =\
np.where(df["Banco 1"].str.contains("LINEA|PSE|BOTON",regex = True,na = False),"1","0")
df["Banco 2_Virtual"] =\
np.where(df["Banco 2"].str.contains("LINEA|PSE|BOTON",regex = True,na = False),"1","0")
conteo_banco=df['Banco 1'].value_counts().iloc[:10].index.tolist()
df['Banco 1_Cl']=df.apply(
lambda row: row['Banco 1'] if (row['Banco 1'] in conteo_banco)
else 'OTRO BANCO',axis=1)
conteo_banco2=df['Banco 2'].value_counts().iloc[:10].index.tolist()
df['Banco 2_Cl']=df.apply(
lambda row: row['Banco 2'] if (row['Banco 2'] in conteo_banco2)
else 'OTRO BANCO',axis=1)
df['Causal'][df['Causal']==' '] ='NO REGISTRA'
df['Causal_Cl']=df['Causal']\
.str.replace('FACTURA MAYOR A LA CAPACIDAD DE PAGO|CLIENTE SE ACOGE PRODUCTO MINIMO VITAL|PRIORIDAD INGRESOS A LA CANASTA BASICA|INDISPONIBILIDAD DE MEDIOS DE PAGO POR EMERGENCIA SANITARIA|NO TIENE DINERO|INCONVENIENTES ECONOMICOS|INCONVENIENTES ECONOMICOS|CONTINGENCIA COVID-19|DESEMPLEADO|INDEPENDIENTE SIN INGRESOS DURANTE CUARENTENA|DISMINUCIÓN INGRESOS / INCONVENIENTES CON NÓMINA',
'DISMINUCIÓN DE INGRESOS')\
.str.replace('OLVIDO DE PAGO|FUERA DE LA CIUDAD|DEUDOR SE OLVIDO DEL PAGO|OLVIDO DEL PAGO / ESTA DE VIAJE',
'OLVIDO')\
.str.replace('PAGA CADA DOS MESES|PAGO BIMESTRAL','PAGO BIMESTRAL')\
.str.replace('INCONFORMIDAD EN EL VALOR FACTURADO|INCONFORMIDAD POR CAMBIO DE DOMICILIO|INCOMFORMIDAD POR CAMBIO DE DOMICILIO|PQR PENDIENTE|TIENE RECLAMO PENDIENTE','INCONFORMIDAD')\
.str.replace('OTRA PERSONA ES LA ENCARGADA DEL PAGO','OTRA PERSONA ES LA ENCARGADA DEL PAGO').str.strip()\
.str.replace('PROBLEMAS FACTURACIÓN|INCONSISTENCIAS EN CARGOS FACTURADOS|RECLAMACIÓN EN TRÃMITE|NO LE LLEGA LA FACTURA / LLEGO DESPUES DE LA FECHA DE VENCIMIENTO|LLEGO LA FACTURA DESPUES DE LA FECHA DE VENCIMIENTO|NO LLEGO FACTURA',
'FACTURA')\
.str.replace('SE NIEGA A RECIBIR INFORMACION',
'RENUENTE')\
.str.replace('INCONVENIENTES CON CANALES DE PAGO|NO HAY PROGRAMACION DEL PAGO|INCONVENIENTES CON EL CANAL DE RECAUDO|NO HAY PROGRAMACION DEL PAGO|INCONVENIENTES CON LA ENTIDAD BANCARIA',
'INCONVENIENTES CON PAGO')\
.str.replace('REALIZARA RETIRO DEL SERVICIO|REALIZARA RETIRO / CANCELACION SERVICIO',
'REALIZARA RETIRO')
conteo_Causa=df['Causal_Cl'].value_counts().iloc[:12].index.tolist()
df['Causal_Cl']=df.apply(
lambda row: row['Causal_Cl'] if (row['Causal_Cl'] in conteo_Causa)
else 'OTRA CAUSA',axis=1)
conteo_Corte=df['Descripcion estado de corte'].value_counts().iloc[:12].index.tolist()
df['Descripcion estado de corte_Cl']=df.apply(
lambda row: row['Descripcion estado de corte'] if (row['Descripcion estado de corte'] in conteo_Corte)
else 'OTRA MOTIVO',axis=1)
df['Descripcion estado de corte_conexión'] = np.where(df['Descripcion estado de corte'].str.contains("CONEXION"),"1",'0')
df['Descripcion estado de corte_suspención'] = np.where(df['Descripcion estado de corte'].str.contains("SUSPENSION"),"1",'0')
df['Descripcion estado de corte_retiro'] = np.where(df['Descripcion estado de corte'].str.contains("RETIRO"),"1",'0')
df['Valor Total Cobrar']=df['Valor Total Cobrar'].astype('float64')
df['Valor Vencido']=df['Valor Vencido'].astype('float64')
df['Valor Factura']=df['Valor Factura'].astype('float64')
df['Valor Intereses de Mora']=df['Valor Intereses de Mora'].astype('float64')
df['Valor financiado']=df['Valor financiado'].astype('float64')
## DROPING VARIABLES
df.drop(['Causal','Codigo edad de mora(para central de riesgos)','Codigo edad de mora(para central de riesgos)',
'Estado Adminfo','Celular con mejor Contactabilidad','Archivo Convergente','Usuario','Vector de Pago'],axis=1,inplace=True)
anis=['Teléfono última gestión','Email','Telefono con mejor Contactabilidad','Email',
'Ultimo Celular Grabado','Ultimo Telefono Grabado','Ultimo Email Grabado','Celular con mejor Contactabilidad']
df.dropna(subset = ["Direccion de instalacion"], inplace=True)
df['llave']=df['Identificacion']+"_"+df['Direccion de instalacion']
df=df.sort_values('Fecha de Asignacion',ascending=True)
## Elimino los duplicados presnetados en la combinación de dichas variables
df=df[~df[['llave','# servicio suscrito/abonado','Fecha de Asignacion','Valor Total Cobrar','Valor Vencido','Descripcion localidad']].duplicated()]
df.sort_values(by=['Identificacion','# servicio suscrito/abonado','Fecha de Asignacion'],ascending=[True,True,True]).drop_duplicates('# servicio suscrito/abonado',keep='last',inplace=True)
### Cuidado con esos pendientes por gestionar
## Cantidad de servicios
cant_serv=df.groupby(['Identificacion']).agg({'Descripcion producto':'nunique','Direccion de instalacion':'nunique'})\
.reset_index().sort_values('Descripcion producto',ascending=False)\
.rename(columns={'Descripcion producto':'cantidad_ser_dir','Direccion de instalacion':'serv_dir'})
df=pd.merge(df,cant_serv,on='Identificacion')
df=df[~df.duplicated()]
# Creo dicha variabel para evitar que hayan duplicados el mismo día
df['llave_2']=df['Identificacion']+"_"+(df['Fecha de Asignacion'].astype('str'))
#
conteo=df.groupby(['Identificacion','Fecha de Asignacion','Fecha de Asignacion_mes']).agg({'Identificacion':'nunique'}).rename(columns={'Identificacion':'cantidad_mes'}).reset_index()
conteo.sort_values('Fecha de Asignacion',ascending=True,inplace=True)
conteo=conteo[~conteo['Identificacion'].duplicated(keep='last')]
conteo['llave_2']=conteo['Identificacion']+"_"+(conteo['Fecha de Asignacion'].astype('str'))
#Se crea con el fin de identificar y quedarme con las claves de cada uno
consolidar=pd.merge(df,conteo['llave_2'],on='llave_2')
#Creo variables dummies para identificar en una misma cantidad de servicios
cer1=pd.concat([pd.get_dummies(consolidar['Descripcion producto_resumen']),consolidar],axis=1) # concateno
cer1['llave_2']=cer1['Identificacion']+"_"+(cer1['Fecha de Asignacion'].astype('str'))
cer=cer1.groupby(['Identificacion']).agg({
'Descripcion producto_resumen':np.array,'Descripcion producto_resumen':'sum',
'TELEFONIA':'sum','INTERNET':'sum','TELEVISION':'sum','UNEMOVIL':'sum',
'LARGA DISTANCIA UNE':'sum','PAQUETE':'sum','OTRO PRODUCTO':'sum','LINEA BASICA':'sum',
"Valor Vencido":"sum","Valor Total Cobrar":"sum",
"Valor financiado":"sum",
"Valor Intereses de Mora":"sum"}).reset_index().\
rename(columns={'Valor Vencido':'valor vencido_sum',
'Valor Factura':'Valor Factura_sum',
'Valor financiado':'Valor financiado_sum',
'Valor Total Cobrar':'Valor Total Cobrar_sum',
'Descripcion producto_resumen':'Total servicio',
'Valor Intereses de Mora':'Valor Intereses de Mora_sum'})
cer.drop(['Total servicio'],axis=1,inplace=True)
data=pd.merge(consolidar,cer,on='Identificacion')
data=data.sort_values(['Fecha de Asignacion','Identificacion'],ascending=[True,True]).drop_duplicates('Identificacion',keep='last')
### Base de datos de la salida
out.sort_values(['Identificacion Del Cliente','Fecha_Gestion'],ascending=[True,True]).drop_duplicates(keep='last',inplace=True)
out.drop(['Unnamed: 19'],axis=1,inplace=True)
## Cruce de bases de datos de salida
full=pd.merge(data,out[['Identificacion Del Cliente','Efectivo Pago','Fecha_Pago']],
left_on='Identificacion',right_on='Identificacion Del Cliente')
full=full[~full.duplicated()]
full=full.sort_values(['Identificacion','Efectivo Pago'],ascending=[True,True]).drop_duplicates(['Identificacion'],keep='first')
full['llave_exp']=full['Identificacion']+full['# servicio suscrito/abonado']
full['valor vencido_sum'][full['valor vencido_sum'] < 0] = 0
full['ratio_vlr_vencido_cobro']=full['valor vencido_sum']/full['Valor Total Cobrar_sum']
full.drop(['llave_2','Direccion de instalacion','Banco 1','Banco 2'],axis=1,inplace=True)
### Exporto y envio a la carpeta para trabajarlo
seg['FECHA DE GESTION']=pd.to_datetime(seg['FECHA DE GESTION'],format='%Y-%m-%d %H:%M:%S')
seg=seg.sort_values(['IDENTIFICACIóN','FECHA DE GESTION']).drop_duplicates('IDENTIFICACIóN',keep='last')
vir['Identificación']=vir['Identificación'].astype('str')
fulll=pd.merge(full,seg[['IDENTIFICACIóN','FECHA DE GESTION','CLASE DE GESTION',
'LINEA/AGENCIA/ABOGADO','CAUSAL','CICLO','OTRA GESTION',
'SE DEJO MENSAJE EN BUZON', 'DEUDOR REALIZA PROMESA DE PAGO TOTAL',
'NO CONTESTAN / OCUPADO', 'DEUDOR REALIZA PROMESA DE PAGO PARCIAL',
'NO HUBO ACUERDO', 'SE ENVIA CUPON DE PAGO','SE DEJO MENSAJE CON TERCERO',
'OTRA GESTION_sum', 'Total_segui','Cantidad_de_cobros_diff_mes', 'Cantidad_recontactos_mes',
'class_Cantidad_de_cobros_diff_mes','class_Cantidad_recontactos_mes']],
left_on='Identificacion',right_on='IDENTIFICACIóN',how='left').\
merge(vir,left_on='Identificacion',right_on='Identificación',how='left')
#libero memoria
del cer
del cer1
fulll["Efectivo Pago"] = (fulll["Efectivo Pago"]=="Efectivo").astype(int)
fulll.drop(['Valor financiado_sum','Fecha_Pago','Valor Intereses de Mora_sum','Valor Total Cobrar','Valor Total Cobrar_sum','Valor Intereses de Mora','Agencia B2B Convergente','Codigo Fraude','CAUSAL','LINEA/AGENCIA/ABOGADO',
'Celular','Valor financiado','# servicio suscrito/abonado','Fecha Ult pago','Fecha estado corte','Codigo Departamento','Centrales de riesgos','dias_desde_ult_gestion',
'Valor Honorarios','Dias_ult_pago','dia_semana_ult_pago','mes_ult_pago','semana_ult_pago','Marca','Marca Funcional','Reportado a central de riesgos','Marca Score','Autopago',
'trimestre_ult_pago','año_ult_pago','DIAS_desde_ult_pago','dias_ult_pago_cobro','Primera Mora','CICLO','Codigo Categoria','Subsegmento',
'dias_ult_pago_fac_ant','Fecha de cuenta de cobro mas antigua','Fecha estado corte','Fecha estado corte','Descripcion Gestion Resultado'],axis=1,inplace=True)
dd=fulll.copy()
dd['class_Cantidad_recontactos_mes']=dd['class_Cantidad_recontactos_mes'].fillna('0')
dd['class_Cantidad_de_cobros_diff_mes'].fillna('0',inplace=True)
# dd['Calificación Servicio Suscrito'][dd['Calificación Servicio Suscrito']==' '] = np.nan
# dd['Calificación Servicio Suscrito']=dd['Calificación Servicio Suscrito'].astype(float)
dd['Fecha de Asignacion']=pd.to_datetime(dd['Fecha de Asignacion'],format='%Y-%m-%d')
dd['Fecha Ult Gestion']=pd.to_datetime(dd['Fecha Ult Gestion'],format='%Y-%m-%d')
dd['Fecha Actualizacion']=pd.to_datetime(dd['Fecha Actualizacion'],format='%Y-%m-%d')
dd['Fecha Vencimiento Sin Recargo']=pd.to_datetime(dd['Fecha Vencimiento Sin Recargo'],format='%Y-%m-%d')
# dd['Fecha de cuenta de cobro mas antigua']=pd.to_datetime(dd['Fecha de cuenta de cobro mas antigua'],format='%Y-%m-%d')
dd['FECHA DE GESTION']=pd.to_datetime(dd['FECHA DE GESTION'],format='%Y-%m-%d %H:%M:%S')
dd['Fecha Debido Cobrar']=pd.to_datetime(dd['Fecha Debido Cobrar'],format='%Y-%m-%d %H:%M:%S', errors='coerce')
dd['Score Contactabilidad'][dd['Score Contactabilidad']==' '] =np.nan
dd['Score Contactabilidad']=dd['Score Contactabilidad'].fillna(dd['Score Contactabilidad'].median())
dd['Score Contactabilidad']=dd['Score Contactabilidad'].astype('float')
dd['Tiene Compromiso'] = (dd['Tiene Compromiso']=="S").astype(int)
# dd['Calificación Servicio Suscrito'][dd['Calificación Servicio Suscrito']==' '] =0
# dd['Calificación Servicio Suscrito']=dd['Calificación Servicio Suscrito'].astype(float)
dd['Financiado'] = (dd["Financiado"]=="SI").astype(int)
dd['Obligaciones con celular']= (dd['Obligaciones con celular']=="S").astype(int)
dd['Inscrito Factura Web']= (dd['Inscrito Factura Web']=="S").astype(int)
dd['Real reportado en central de riesgos']= (dd['Real reportado en central de riesgos']=="S").astype(int)
dd['Tipo Habito de Pago'][dd['Tipo Habito de Pago']==' '] ='NO REGISTRA'
dd['Calificación Identificación'][dd['Calificación Identificación']==' '] =dd["Calificación Identificación"].mode()[0]
dd["Calificación Identificación"]=dd["Calificación Identificación"].astype(float)
dd['CLASE DE GESTION'][dd['CLASE DE GESTION']==' ']='NO REGISTRA'
### Clasificaciones
dd['Class_Total valor pendiente suscripcion']=pd.qcut(dd['Total valor pendiente suscripcion'].astype(float), 5,
labels=["A", "B", "C","D","E"]).astype('str')
dd['Total valor pendiente suscripcion']=dd['Total valor pendiente suscripcion'].astype(float)
dd['Valor Pendiente']=dd['Valor Pendiente'].astype(float)
dd['# de Dias De Mora']=dd['# de Dias De Mora'].astype(float)
dd['Dias sin Gestion']=dd['Dias sin Gestion'].astype(float)
dd['antiguedad_mes']=dd['antiguedad_mes'].astype(float)
dd['Minimo Cuentas con Saldo Suscripción']=dd['Minimo Cuentas con Saldo Suscripción'].astype(float)
dd['Maximo Cuentas con Saldo Suscripción']=dd['Maximo Cuentas con Saldo Suscripción'].astype(float)
dd['Total_segui']=dd['Total_segui'].astype(float)
### OULIERS
qtil9_vlrvencido=dd['valor vencido_sum'].quantile(0.95)
qtil9_vlfac=dd['Valor Factura'].quantile(0.90)
qtil9_total=dd['Total valor pendiente suscripcion'].quantile(0.90)
qtil9_total_ven=dd['Valor Vencido'].quantile(0.90)
qtil_75_dia=dd['# de Dias De Mora'].quantile(0.75)
qtil_75_dia_ges=dd['Dias sin Gestion'].quantile(0.80)
qtil_mes=dd['antiguedad_mes'].quantile(0.95)
qtil_min_cuentas=dd['Minimo Cuentas con Saldo Suscripción'].quantile(0.99)
qtil_max_cuentas=dd['Maximo Cuentas con Saldo Suscripción'].quantile(0.99)
qtil_sus=dd['Dias Suspension'].quantile(0.85)
qtil_segui=dd['Total_segui'].quantile(0.95)
dd['valor vencido_sum']= np.where(dd["valor vencido_sum"] > qtil9_vlrvencido, qtil9_vlrvencido ,dd["valor vencido_sum"])
dd['Valor Factura'] = np.where(dd['Valor Factura'] > qtil9_vlfac, qtil9_vlfac,dd["Valor Factura"])
dd['Valor Factura'] = np.where(dd['Valor Factura'] < 0, dd["Valor Factura"].quantile(0.5),dd["Valor Factura"])
dd['Total valor pendiente suscripcion']=np.where(dd['Total valor pendiente suscripcion'] > qtil9_total, qtil9_total,dd["Total valor pendiente suscripcion"])
dd['Valor Vencido']=np.where(dd['Valor Vencido'] > qtil9_total_ven, qtil9_total_ven,dd["Valor Vencido"])
dd['Valor Vencido']=np.where(dd['Valor Vencido'] < dd['Valor Vencido'].quantile(0.1), dd['Valor Vencido'].quantile(0.3),dd["Valor Vencido"])
dd['# de Dias De Mora']=np.where(dd['# de Dias De Mora'] > qtil_75_dia, qtil_75_dia,dd['# de Dias De Mora'])
dd['Dias sin Gestion']=np.where(dd['Dias sin Gestion'] > qtil_75_dia_ges, qtil_75_dia_ges,dd['Dias sin Gestion'])
dd['ratio_vlr_vencido_cobro'].fillna(dd['ratio_vlr_vencido_cobro'].median(),inplace=True)
dd['Calificación Servicio Suscrito'][dd['Calificación Servicio Suscrito']==' '] = np.nan
dd['Calificación Servicio Suscrito']=dd['Calificación Servicio Suscrito'].fillna(dd['Calificación Servicio Suscrito'].median())
dd['antiguedad_mes']=np.where(dd['antiguedad_mes'] > qtil_mes, qtil_mes,dd['antiguedad_mes'])
dd['Minimo Cuentas con Saldo Suscripción']=np.where(dd['Minimo Cuentas con Saldo Suscripción'] > qtil_min_cuentas, qtil_min_cuentas,dd['Minimo Cuentas con Saldo Suscripción'])
dd['Maximo Cuentas con Saldo Suscripción']=np.where(dd['Maximo Cuentas con Saldo Suscripción'] > qtil_max_cuentas, qtil_max_cuentas,dd['Maximo Cuentas con Saldo Suscripción'])
dd['Dias Suspension']=np.where(dd['Dias Suspension'] > qtil_sus, qtil_sus,dd['Dias Suspension'])
### Drop
dd.drop(['Descripcion Mejor Codigo Gestion Mes','Codigo de Gestion Resultado Visita','Análisis Vector',
'Fecha de Instalacion','DÃa Pago 3','Descripcion localidad',
'Fecha Ingreso Fraude','Maxima fecha Ult Gestion','Usuario Grabador',
'DÃa Pago 1','DÃa Pago 2','Ultimo Codigo de Gestion Agrupado','# de Suscripción',
'fecha de importacion',
'Fecha de Asignacion_mes','Descripcion producto','Fecha Financiacion','Codigo estado de corte','Descripcion estado de corte'],axis=1,inplace=True)
dd.ratio_vlr_vencido_cobro.fillna(dd.ratio_vlr_vencido_cobro.median(),inplace=True)
dd['retiro']=np.where(dd['Fecha Retiro'].isna(),0,1)
dd.drop(['Nivel de riesgo experian','Fecha Retiro','Nivel de Riesgo','Indicador BI','Tipo Contactabilidad',
'Gestion comercial','Estrategia','Usuario Fraudulento','Tipo de Reporte a Central de Riesgos','Banco 2_Cl'],axis=1,inplace=True)
dd.ratio_vlr_vencido_cobro.fillna(dd.ratio_vlr_vencido_cobro.median(),inplace=True)
dd['Efectivo Pago']=dd['Efectivo Pago'].astype(str)
dd['Class_Total valor pendiente suscripcion']=dd['Class_Total valor pendiente suscripcion'].astype('str')
dd['Califica_suscr_class']=dd['Califica_suscr_class'].astype('str')
dd['# de Dias De Mora'].fillna(0,inplace=True)
breaks3 = jenkspy.jenks_breaks(dd['# de Dias De Mora'], nb_class=8)
dd['class_# de Dias De Mora'] = pd.cut(dd['# de Dias De Mora'] , bins=breaks3, include_lowest=True).astype(str)
breaks2 = jenkspy.jenks_breaks(dd['ratio_vlr_vencido_cobro'], nb_class=5)
dd['class_ratio_vlr_vencido_cobro_class'] = pd.cut(dd['ratio_vlr_vencido_cobro'] , bins=breaks2, include_lowest=True).astype(str)
dd['Total'].fillna(0,inplace=True)
dd['Total_clasificacion_cant_virtuales'] = pd.cut(x=dd['Total'],
bins=[-1,0,1,2,3,6,10,17,30,1000],
labels=["0","1","2","3","4-6","7-10", "11-17","18-30", ">30"]).astype(str).fillna('0')
### Divido
sin_seg=dd[dd['IDENTIFICACIóN'].isna()]
sin_seg.drop(sin_seg[sin_seg.columns[79:139]].columns,axis=1,inplace=True)
# con seguimiento
dd=dd[~dd['IDENTIFICACIóN'].isna()]
grupo=dd.groupby(['Efectivo Pago','Descripcion departamento', 'sistema origen',
'Vector Cualitativo # Suscripción', 'Tipificación Cliente',
'Perfil Digital', 'Descripcion subcategoria', 'Descripcion categoria', 'Estado del Cliente',
'Tipo Habito de Pago', 'Tipo Producto Servicio Suscrito', 'Analisis De Habito','Hogar',
'Califica_suscr_class', 'Banco 1_Cl','Descripcion estado de corte_Cl','class_Cantidad_de_cobros_diff_mes',
'class_Cantidad_recontactos_mes', 'Class_IVR',
'Class_sms','Class_Total valor pendiente suscripcion','Total_clasificacion_cant_virtuales',
'class_ratio_vlr_vencido_cobro_class','class_# de Dias De Mora']).size().reset_index(name='frecuency')
# dic_reg=pd.crosstab(grupo['Descripcion Regional'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_des_dep=pd.crosstab(grupo['Descripcion departamento'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_vec_cua=pd.crosstab(grupo['Vector Cualitativo # Suscripción'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_sis_origen=pd.crosstab(grupo['sistema origen'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_tipi_clien=pd.crosstab(grupo['Tipificación Cliente'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_per_dig=pd.crosstab(grupo['Perfil Digital'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_desc_sub=pd.crosstab(grupo['Descripcion subcategoria'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
# dic_desc_sus=pd.crosstab(grupo['Tipificacion suscripcion'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
# dic_ant_clie=pd.crosstab(grupo['Antiguedad Cliente'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_desc_cat=pd.crosstab(grupo['Descripcion categoria'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
# dic_est_cliente=pd.crosstab(grupo['Estado del Cliente'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_hab=pd.crosstab(grupo['Tipo Habito de Pago'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_sus_tipo=pd.crosstab(grupo['Tipo Producto Servicio Suscrito'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_ana_habi=pd.crosstab(grupo['Analisis De Habito'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_ana_hogar=pd.crosstab(grupo['Hogar'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_cali=pd.crosstab(grupo['Califica_suscr_class'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_ban=pd.crosstab(grupo['Banco 1_Cl'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_corte=pd.crosstab(grupo['Descripcion estado de corte_Cl'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_pend_sus=pd.crosstab(grupo['Class_Total valor pendiente suscripcion'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_sms=pd.crosstab(grupo['Class_sms'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_ivr=pd.crosstab(grupo['Class_IVR'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
# dic_CE=pd.crosstab(grupo['class_CE'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_mora=pd.crosstab(grupo['class_# de Dias De Mora'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_ratio=pd.crosstab(grupo['class_ratio_vlr_vencido_cobro_class'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
#dd['Descripcion Regional2']=dd['Descripcion Regional'].replace(dic_reg)
dd['Descripcion departamento2']=dd['Descripcion departamento'].replace(dic_des_dep)
dd['Vector Cualitativo # Suscripción2']=dd['Vector Cualitativo # Suscripción'].replace(dic_vec_cua)
dd['sistema origen2']=dd['sistema origen'].replace(dic_sis_origen)
dd['Tipificación Cliente2']=dd['Tipificación Cliente'].replace(dic_tipi_clien)
dd['Perfil Digital2']=dd['Perfil Digital'].replace(dic_per_dig)
dd['Descripcion subcategoria2']=dd['Descripcion subcategoria'].replace(dic_desc_sub)
# dd['Tipificacion suscripcion2']=dd['Tipificacion suscripcion'].replace(dic_desc_sus)
# dd['Antiguedad Cliente2']=dd['Antiguedad Cliente'].replace(dic_ant_clie)
dd['Descripcion categoria2']=dd['Descripcion categoria'].replace(dic_desc_cat)
# dd['Estado del Cliente2']=dd['Estado del Cliente'].replace(dic_est_cliente)
dd['Tipo Habito de Pago2']=dd['Tipo Habito de Pago'].replace(dic_hab)
dd['Tipo Producto Servicio Suscrito2']=dd['Tipo Producto Servicio Suscrito'].replace(dic_sus_tipo)
dd['Analisis De Habito2']=dd['Analisis De Habito'].replace(dic_ana_habi)
dd['Hogar2']=dd['Hogar'].replace(dic_ana_hogar)
dd['Califica_suscr_class2']=dd['Califica_suscr_class'].replace(dic_cali)
dd['Banco 1_Cl2']=dd['Banco 1_Cl'].replace(dic_ban)
dd['Descripcion estado de corte_Cl2']=dd['Descripcion estado de corte_Cl'].replace(dic_corte)
dd['Class_Total valor pendiente suscripcion2']=dd['Class_Total valor pendiente suscripcion'].replace(dic_pend_sus)
dd['Class_sms2']=dd['Class_sms'].replace(dic_sms)
dd['Class_IVR2']=dd['Class_IVR'].replace(dic_ivr)
# dd['class_CE2']=dd['class_CE'].replace(dic_CE)
dd['class_# de Dias De Mora2']=dd['class_# de Dias De Mora'].replace(dic_mora)
dd['class_ratio_vlr_vencido_cobro_class2']=dd['class_ratio_vlr_vencido_cobro_class'].replace(dic_ratio)
dd['Class_sms2'].fillna(0.5,inplace=True)
dd['Class_IVR2'].fillna(0.5,inplace=True)
#dd['class_CE2'].fillna(0.5,inplace=True)
dic_reco=pd.crosstab(grupo['class_Cantidad_de_cobros_diff_mes'].astype(str),grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_reco_mes=pd.crosstab(grupo['class_Cantidad_recontactos_mes'].astype(str),grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dd['class_Cantidad_recontactos_mes2']=dd['class_Cantidad_recontactos_mes'].replace(dic_reco_mes)
dd['class_Cantidad_de_cobros_diff_mes2']=dd['class_Cantidad_de_cobros_diff_mes'].replace(dic_reco)
dd['class_Cantidad_de_cobros_diff_mes2'].fillna('0',inplace=True)
dd['Estandar']=dd[dd.filter(like='2').columns.drop(['Banco 2_Virtual','12', '20', '21', '22', '23'])].sum(axis=1)
labels=["Deficiente", "Malo",'Regular',"Bueno","Muy bueno"]
breaks = jenkspy.jenks_breaks(dd['Estandar'], nb_class=5)
dd['cut_break'] = pd.cut(dd['Estandar'] , bins=breaks, labels=labels, include_lowest=True)
## comienzo con el seguimiento
grupo_2=sin_seg.groupby(['Efectivo Pago','Descripcion departamento', 'sistema origen',
'Vector Cualitativo # Suscripción',
'Perfil Digital', 'Descripcion subcategoria',
'Descripcion categoria', 'Estado del Cliente',
'Tipo Habito de Pago',
'Analisis De Habito', 'Descripcion producto_resumen',
'Hogar', 'Califica_suscr_class', 'Banco 1_Cl', 'Causal_Cl',
'Descripcion estado de corte_Cl','Class_Total valor pendiente suscripcion','Total_clasificacion_cant_virtuales',
'class_ratio_vlr_vencido_cobro_class','class_# de Dias De Mora']).size().reset_index(name='frecuency')
# dic_reg=pd.crosstab(grupo_2['Descripcion Regional'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_des_dep=pd.crosstab(grupo_2['Descripcion departamento'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_vec_cua=pd.crosstab(grupo_2['Vector Cualitativo # Suscripción'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_sis_origen=pd.crosstab(grupo_2['sistema origen'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_dias_mora=pd.crosstab(grupo_2['class_# de Dias De Mora'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_per_dig=pd.crosstab(grupo_2['Perfil Digital'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_desc_sub=pd.crosstab(grupo_2['Descripcion subcategoria'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
# dic_desc_sus=pd.crosstab(grupo_2['Tipificacion suscripcion'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
# dic_ant_clie=pd.crosstab(grupo_2['Antiguedad Cliente'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_desc_cat=pd.crosstab(grupo_2['Descripcion categoria'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_est_cliente=pd.crosstab(grupo_2['Estado del Cliente'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_hab=pd.crosstab(grupo_2['Tipo Habito de Pago'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
# dic_sus_tipo=pd.crosstab(grupo_2['Tipo Producto Servicio Suscrito'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_ana_habi=pd.crosstab(grupo_2['Analisis De Habito'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_ana_hogar=pd.crosstab(grupo_2['Hogar'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_cali= | pd.crosstab(grupo_2['Califica_suscr_class'],grupo_2['Efectivo Pago']) | pandas.crosstab |
# -*- coding: utf-8 -*-
"""
@author: Diego
"""
import zipfile
import pandas as pd
from bs4 import BeautifulSoup
import requests
import io
import sqlite3
import os
import datetime
cwd = os.getcwd()
pd.set_option("display.width", 400)
pd.set_option("display.max_columns", 10)
pd.options.mode.chained_assignment = None
cwd = os.getcwd()
db = sqlite3.connect(os.path.join(cwd, "data", "finance.db"))
cur = db.cursor()
#%% functions
def create_tables():
db.execute(
"""CREATE TABLE IF NOT EXISTS files(
file_name CHARACTER VARYING (19),
last_modified TIMESTAMP
)"""
)
for fs in ["bpa", "bpp"]:
db.execute(
f"""CREATE TABLE IF NOT EXISTS {fs}
(cnpj CHARACTER VARYING(18),
dt_refer SMALLINT,
grupo_dfp CHARACTER VARYING(11),
dt_fim_exerc DATE,
cd_conta CHARACTER VARYING(15),
ds_conta CHARACTER VARYING(100),
vl_conta BIGINT,
itr_dfp CHARACTER VARYING(3))"""
)
db.execute(f"CREATE INDEX idx_{fs}_cnpj_grupo ON {fs}(cnpj, grupo_dfp);")
for fs in ["dre", "dva", "dfc"]:
db.execute(
f"""CREATE TABLE IF NOT EXISTS {fs}
(cnpj CHARACTER VARYING(18),
dt_refer SMALLINT,
grupo_dfp CHARACTER VARYING(11),
dt_ini_exerc DATE,
dt_fim_exerc DATE,
cd_conta CHARACTER VARYING(15),
ds_conta CHARACTER VARYING(100),
vl_conta BIGINT,
itr_dfp CHARACTER VARYING(3),
fiscal_quarter SMALLINT
)"""
)
db.execute(f"CREATE INDEX idx_{fs}_cnpj_grupo ON {fs}(cnpj, grupo_dfp);")
db.execute(
"""CREATE TABLE IF NOT EXISTS dmpl
(cnpj CHARACTER VARYING(18),
dt_refer INTEGER,
grupo_dfp CHARACTER VARYING(11),
dt_ini_exerc DATE,
dt_fim_exerc DATE,
cd_conta CHARACTER VARYING(15),
ds_conta CHARACTER VARYING(100),
coluna_df CHARACTER VARYING(100),
vl_conta BIGINT,
itr_dfp CHARACTER VARYING(3),
fiscal_quarter SMALLINT)"""
)
db.execute(
"""CREATE TABLE IF NOT EXISTS tickers(
ticker CHARACTER VARYING(7),
cnpj CHARACTER VARYING(18),
type CHARACTER VARYING(7),
sector CHARACTER VARYING(50),
subsector CHARACTER VARYING(50),
segment CHARACTER VARYING(50),
denom_social CHARACTER VARYING(100),
denom_comerc CHARACTER VARYING(100),
PRIMARY KEY (ticker)
)"""
)
return
def files_to_update():
# create dataframe with the files already processed
db_files = pd.read_sql("SELECT * FROM files", db)
# create a dataframe with the files that area available in the urls
urls = [
"http://dados.cvm.gov.br/dados/CIA_ABERTA/DOC/DFP/DADOS/",
"http://dados.cvm.gov.br/dados/CIA_ABERTA/DOC/ITR/DADOS/",
]
files = {}
i = 0
for url in urls:
response = requests.get(url)
soup = BeautifulSoup(response.content, "lxml")
table = soup.find("table")
tr = table.find_all("tr")
for t in tr:
if (t.text[3:17] == "_cia_aberta_20") and (t.text[19:23] == ".zip"):
file_name = t.text[0:19]
last_modified = | pd.to_datetime(t.text[23:39]) | pandas.to_datetime |
import json
import pathlib
from xml.sax.saxutils import escape
from scipy import sparse
import regex
import sklearn
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import normalize
import numpy as np
import pandas as pd
from lxml import etree
WORD_PATTERN = r"(?:(?:\p{L}|\p{N}){2,}|[IVXivx0-9])"
OUT_OF_VOC_TOKEN = (
"https://gitlab.inria.fr/parietal/jerome_dockes/tree/"
"main/semantic_structure/corpus_manip/xml/ns#"
"out-of-vocabulary-token"
)
RARE_TOKEN = (
"https://gitlab.inria.fr/parietal/jerome_dockes/tree/"
"main/semantic_structure/corpus_manip/xml/ns#"
"rare-token"
)
_TERM_BLUE = "\033[94m"
_TERM_GREEN = "\033[92m"
_TERM_ENDC = "\033[0m"
# not synonyms despite high jaro-winkler similarity
_DIFFERENT_WORDS = {
("addiction", "addition"),
("imitation", "limitation"),
("preference", "reference"),
("asymmetric", "symmetric"),
("mediation", "medication"),
("mediation", "meditation"),
("preferential", "referential"),
("asymptomatic", "symptomatic"),
("covert attention", "overt attention"),
}
def _identity(arg):
return arg
def nltk_stop_words():
with open(
str(pathlib.Path(__file__).parent / "data" / "nltk_stop_words.txt")
) as f:
words = {line.strip() for line in f}.difference({""})
return words
def sklearn_stop_words():
return set(sklearn.feature_extraction.text.ENGLISH_STOP_WORDS)
class Tokenizer(object):
def __init__(self, token_pattern=WORD_PATTERN):
self.token_pattern_ = token_pattern
self._compiled_regexp = regex.compile(token_pattern)
self.match_positions = None
def __call__(self, text, keep_pos=False):
if keep_pos:
return self._tokenize_keep_pos(text)
return self._tokenize(text)
def _tokenize(self, text):
return self._compiled_regexp.findall(str(text))
def _tokenize_keep_pos(self, text):
matches = self._compiled_regexp.finditer(str(text))
result = []
positions = []
for m in matches:
result.append(m.group())
positions.append(m.span())
self.match_positions = np.asarray(positions)
return result
def _get_stemmer(stemming_kind):
if stemming_kind == "identity":
return _identity
if stemming_kind == "porter_stemmer":
try:
import nltk
except ImportError:
raise ImportError(
"nltk must be installed to use the Porter stemmer")
return nltk.PorterStemmer().stem
if stemming_kind == "wordnet_lemmatizer":
try:
import nltk
except ImportError:
raise ImportError(
"nltk must be installed to use the WordNet lemmatizer")
return nltk.stem.WordNetLemmatizer().lemmatize
raise ValueError('invalid value for "stemming_kind".')
def _get_stop_words(stop_words):
if not isinstance(stop_words, str):
return set(stop_words)
if stop_words == "nltk":
return nltk_stop_words()
if stop_words == "sklearn":
return sklearn_stop_words()
raise ValueError(stop_words)
class Standardizer(object):
def __init__(self, stemming="identity"):
self.stemming_ = stemming
self._stemmer = _get_stemmer(stemming)
def _standardize_token(self, token):
return self._stemmer(str.lower(token))
def __call__(self, tokens):
return list(map(self._standardize_token, tokens))
class StopWordFilter(object):
def __init__(self, stop_words, standardizer=Standardizer()):
stop_words = _get_stop_words(stop_words)
self.stop_words_ = set(standardizer(stop_words))
self.kept_tokens = None
def __call__(self, tokens, keep_pos=False):
if keep_pos:
return self._filter_stop_words_keep_pos(tokens)
return self._filter_stop_words(tokens)
def _filter_stop_words(self, tokens):
return [t for t in tokens if t not in self.stop_words_]
def _filter_stop_words_keep_pos(self, tokens):
result = []
kept = []
for i, tok in enumerate(tokens):
if tok not in self.stop_words_:
kept.append(i)
result.append(tok)
self.kept_tokens = np.asarray(kept)
return result
def _build_phrase_map(phrases):
phrase_map = {}
for phrase in phrases:
_update_phrase_map(phrase_map, phrase)
return phrase_map
def _update_phrase_map(phrase_map, phrase):
head = phrase[0]
tail = phrase[1:]
head_map = phrase_map.get(head, {})
if not len(tail):
head_map[""] = {}
phrase_map[head] = head_map
return
_update_phrase_map(head_map, tail)
phrase_map[head] = head_map
return
def _transform_out_of_voc_token(token, out_of_voc):
if out_of_voc == "ignore":
return tuple()
if out_of_voc == "keep":
return (token,)
if out_of_voc == "[]":
return ("[{}]".format(token),)
if out_of_voc == "{}":
return ("{{{}}}".format(token),)
return (out_of_voc,)
def _extract_next_phrase(phrase_map, sentence, out_of_voc):
if not sentence:
return tuple(), sentence
sentence_head = sentence[0]
sentence_tail = sentence[1:]
if sentence_head not in phrase_map:
return (
_transform_out_of_voc_token(sentence_head, out_of_voc),
sentence_tail,
)
phrase_head = sentence_head
phrase_tail, new_sentence_tail = _extract_next_phrase(
phrase_map[phrase_head], sentence_tail, out_of_voc="ignore"
)
if phrase_tail:
return (phrase_head, *phrase_tail), new_sentence_tail
if "" in phrase_map[phrase_head]:
return (phrase_head,), sentence_tail
return (
_transform_out_of_voc_token(sentence_head, out_of_voc),
sentence_tail,
)
def _extract_phrases(phrase_map, sentence, out_of_voc):
phrases = []
while sentence:
next_phrase, sentence = _extract_next_phrase(
phrase_map, sentence, out_of_voc
)
if next_phrase:
phrases.append(next_phrase)
return phrases
class PhraseExtractor(object):
def __init__(self, phrases, out_of_voc="ignore"):
self.phrases_ = set(phrases)
self.out_of_voc_ = out_of_voc
self._phrase_map = _build_phrase_map(phrases)
self.phrase_positions = None
def __call__(self, sentence, keep_pos=False):
if keep_pos:
return self._extract_phrases_keep_pos(sentence)
return _extract_phrases(self._phrase_map, sentence, self.out_of_voc_)
def _extract_phrases_keep_pos(self, sentence):
out_of_voc = {"ignore": OUT_OF_VOC_TOKEN}.get(
self.out_of_voc_, self.out_of_voc_
)
extracted = _extract_phrases(self._phrase_map, sentence, out_of_voc)
result = []
i = 0
positions = []
for phrase in extracted:
if (
phrase != (OUT_OF_VOC_TOKEN,)
or self.out_of_voc_ == OUT_OF_VOC_TOKEN
):
result.append(phrase)
positions.append((i, i + len(phrase)))
i += len(phrase)
self.phrase_positions = np.asarray(positions)
return result
class VocabularyMapping(object):
def __init__(self, voc_mapping={}):
self.voc_mapping = voc_mapping
def __call__(self, phrases):
if not self.voc_mapping:
return phrases
return [self.voc_mapping.get(p, p) for p in phrases]
class TokenizingPipeline(object):
def __init__(
self,
vocabulary_mapping=None,
phrase_extractor=None,
stop_word_filter=None,
standardizer=None,
tokenizer=None,
as_tuples=False,
keep_pos=False,
frequencies=None,
raw_vocabulary=None,
):
self.vocabulary_mapping_ = vocabulary_mapping
self.phrase_extractor_ = phrase_extractor
self.stop_word_filter_ = stop_word_filter
self.standardizer_ = standardizer
self.tokenizer_ = tokenizer
self.as_tuples_ = as_tuples
self.keep_pos = keep_pos
self.frequencies = frequencies
if self.frequencies is not None:
self.frequencies.index = tuple_sequence_to_strings(
self.frequencies.index
)
if raw_vocabulary is not None:
self.raw_vocabulary = tuple_sequence_to_strings(raw_vocabulary)
else:
self.raw_vocabulary = None
if self.tokenizer_ is None:
self.tokenizer_ = Tokenizer()
if self.standardizer_ is None:
self.standardizer_ = Standardizer()
if self.stop_word_filter_ is None:
self.stop_word_filter_ = StopWordFilter("nltk", self.standardizer_)
if self.phrase_extractor_ is None:
self.phrase_extractor_ = PhraseExtractor([], out_of_voc="keep")
if self.vocabulary_mapping_ is None:
self.vocabulary_mapping_ = VocabularyMapping()
self.positions = None
self.extracted_phrases = None
self.raw_text = None
def to_vocabulary_file(self, voc_file):
if self.frequencies is not None:
freq = self.frequencies
else:
freq = np.ones(len(self.raw_vocabulary))
pd.Series(freq, index=self.raw_vocabulary).to_csv(
voc_file, header=False
)
_save_voc_mapping(
voc_file,
self.vocabulary_mapping_.voc_mapping,
stemming=self.standardizer_.stemming_,
)
def __call__(self, text, keep_pos=None):
keep_pos = self.keep_pos if keep_pos is None else keep_pos
tokenized = self.vocabulary_mapping_(
self.phrase_extractor_(
self.stop_word_filter_(
self.standardizer_(
self.tokenizer_(text, keep_pos=keep_pos)
),
keep_pos=keep_pos,
),
keep_pos=keep_pos,
)
)
if self.as_tuples_:
phrases = tokenized
else:
phrases = tuple_sequence_to_strings(tokenized)
if keep_pos:
self._store_positions(phrases, text)
return phrases
def _store_positions(self, phrases, text):
self.raw_text = text
self.extracted_phrases = phrases
if not phrases:
self.positions = np.asarray([])
return
token_positions = self.tokenizer_.match_positions[
self.stop_word_filter_.kept_tokens
]
start_positions = token_positions[
self.phrase_extractor_.phrase_positions[:, 0]
][:, 0]
end_positions = token_positions[
self.phrase_extractor_.phrase_positions[:, 1] - 1
][:, 1]
self.positions = np.asarray([start_positions, end_positions]).T
def get_full_vocabulary(self, as_tuples=False):
voc = self.phrase_extractor_.phrases_
if as_tuples:
return sorted(voc)
return sorted(tuple_sequence_to_strings(voc))
def get_vocabulary(self, as_tuples=False):
voc = self.phrase_extractor_.phrases_.difference(
self.vocabulary_mapping_.voc_mapping.keys()
)
if as_tuples:
return sorted(voc)
return sorted(tuple_sequence_to_strings(voc))
def get_frequencies(self):
if hasattr(self, "frequencies_"):
return self.frequencies_
if self.frequencies is None:
return np.ones(len(self.get_vocabulary()))
idx = tuple_sequence_to_strings(
[(self(w) or [""])[0] for w in self.frequencies.index]
)
freq = self.frequencies.groupby(idx).sum()
self.frequencies_ = freq.loc[self.get_vocabulary()].values
return self.frequencies_
def highlighted_text(self, extra_info=None):
return highlight_text(
self.raw_text,
self.extracted_phrases,
self.positions,
extra_info=extra_info,
)
def print_highlighted_text(self, replace=False):
print_highlighted_text(self.highlighted_text(), replace=replace)
def highlight_text(text, phrases, positions, extra_info=None):
phrases = tuple_sequence_to_strings(phrases)
snippets = []
prev = 0
snippets.append("<highlighted_text>")
for phrase, (start, stop) in zip(phrases, positions):
snippets.append(escape(text[prev:start]))
attributes = {"standardized_form": phrase}
if extra_info is not None:
attributes.update(extra_info(phrase))
attr_str = " ".join(
['{}="{}"'.format(k, v) for (k, v) in attributes.items()]
)
snippets.append("<extracted_phrase {}>".format(attr_str))
snippets.append(escape(text[start:stop]))
snippets.append("</extracted_phrase>")
prev = stop
snippets.append(escape(text[prev:]))
snippets.append("</highlighted_text>")
return "".join(snippets)
def get_printable_highlighted_text(text, replace=False):
highlighted = etree.XML(text)
parts = []
for node in highlighted.xpath("child::node()"):
if isinstance(node, etree._Element):
if replace:
parts.extend(
[
_TERM_GREEN,
"[",
node.get("standardized_form"),
"]",
_TERM_ENDC,
]
)
else:
parts.extend([_TERM_BLUE, "[", node.text, "]", _TERM_ENDC])
else:
parts.append(str(node))
return "".join(parts)
def print_highlighted_text(text, replace=False):
print(get_printable_highlighted_text(text, replace=replace))
def get_html_highlighted_text(text, standalone=False):
stylesheet_path = pathlib.Path(__file__).parent / "data" / "highlight.xsl"
stylesheet = etree.XSLT(etree.parse(str(stylesheet_path)))
html = stylesheet(etree.XML(text, parser=etree.XMLParser(recover=True)))
if not standalone:
html = html.findall("body")[0][0]
return etree.tostring(html, method="html", encoding="unicode")
def load_vocabulary(vocabulary_file, token_pattern=WORD_PATTERN):
tokenizer = Tokenizer(token_pattern=token_pattern)
if vocabulary_file.endswith(".csv"):
try:
word_freq = pd.read_csv(
vocabulary_file,
header=None,
encoding="utf-8",
usecols=[0, 1],
na_values=[],
keep_default_na=False,
).values
words, frequencies = word_freq.T
except ValueError:
words = pd.read_csv(
vocabulary_file,
header=None,
encoding="utf-8",
usecols=[0],
na_values=[],
keep_default_na=False,
).values.ravel()
frequencies = np.ones(words.shape)
else:
with open(vocabulary_file, "r", encoding="utf-8") as fh:
words = fh.readlines()
frequencies = np.ones(len(words), dtype=int)
vocabulary = []
for word, freq in zip(words, frequencies):
tokens = tuple(tokenizer(word))
if tokens:
vocabulary.append((tokens, freq))
return vocabulary
def tokenizing_pipeline_from_vocabulary(
vocabulary,
frequencies=None,
stemming="identity",
stop_words="nltk",
out_of_voc="ignore",
voc_mapping={},
token_pattern=WORD_PATTERN,
as_tuples=False,
):
if frequencies is None:
frequencies = np.ones(len(vocabulary))
tokenizer = Tokenizer(token_pattern)
standardizer = Standardizer(stemming=stemming)
stop_word_filter = StopWordFilter(stop_words, standardizer)
std = TokenizingPipeline(
phrase_extractor=PhraseExtractor(phrases=[], out_of_voc="keep"),
stop_word_filter=stop_word_filter,
standardizer=standardizer,
tokenizer=tokenizer,
)
phrases = {tuple(std(phrase)) for phrase in vocabulary}.difference({()})
phrase_extractor = PhraseExtractor(phrases, out_of_voc=out_of_voc)
vocabulary = string_sequence_to_tuples(vocabulary)
if voc_mapping == "auto":
voc_mapping = make_vocabulary_mapping(
vocabulary, frequencies, stemming=stemming
)
voc_mapper = VocabularyMapping(voc_mapping)
return TokenizingPipeline(
voc_mapper,
phrase_extractor,
stop_word_filter,
standardizer,
tokenizer,
as_tuples=as_tuples,
raw_vocabulary=vocabulary,
frequencies= | pd.Series(frequencies, index=vocabulary) | pandas.Series |
import numpy as np
import pandas as pd
import ipaddress
from .engineobj import SqPandasEngine
class EvpnvniObj(SqPandasEngine):
@staticmethod
def table_name():
return 'evpnVni'
def get(self, **kwargs) -> pd.DataFrame:
"""Class-specific to extract info from addnl tables"""
drop_cols = []
addnl_fields = kwargs.pop('addnl_fields', [])
columns = kwargs.get('columns', [])
if (columns != ['*'] and 'ifname' not in columns and
'ifname' not in addnl_fields):
addnl_fields.append('ifname')
drop_cols.append('ifname')
if (columns == ['default'] or columns == ['*'] or
'remoteVtepCnt' in columns):
getVtepCnt = True
if columns != ['*'] and 'remoteVtepList' not in columns:
addnl_fields.append('remoteVtepList')
drop_cols.append('remoteVtepList')
else:
getVtepCnt = False
df = super().get(addnl_fields=addnl_fields, **kwargs)
if df.empty:
return df
if getVtepCnt:
insidx = df.columns.tolist().index('remoteVtepList')
df.insert(insidx+1, 'remoteVtepCnt',
df.remoteVtepList.str.len())
# See if we can retrieve the info to fill out the rest of the data
# Start with VLAN values
if 'vlan' not in df.columns and 'vrf' not in df.columns:
return df.drop(columns=drop_cols, errors='ignore')
iflist = list(set([x for x in df[df.vlan == 0]['ifname'].to_list()
if x and x != 'None']))
if iflist:
ifdf = self._get_table_sqobj('interfaces').get(
namespace=kwargs.get('namespace', []), ifname=iflist,
columns=['namespace', 'hostname', 'ifname', 'state', 'vlan',
'vni'])
if not ifdf.empty:
df = df.merge(ifdf, on=['namespace', 'hostname', 'ifname'],
how='left', suffixes=('', '_y')) \
.drop(columns=['timestamp_y', 'state_y', 'vni_y'])
df['vlan'] = np.where(
df['vlan_y'].notnull(), df['vlan_y'], df['vlan'])
df = df.drop(columns=['vlan_y']) \
.fillna({'vlan': 0}) \
.astype({'vlan': int})
# Now we try to see if we can get the VRF info for all L2 VNIs as well
ifdf = self._get_table_sqobj('interfaces') \
.get(namespace=kwargs.get('namespace', []), type=['vlan'],
columns=['namespace', 'hostname', 'vlan', 'master']) \
.query('master != ""') \
.reset_index(drop=True)
if not ifdf.empty and not df.empty and 'vrf' in df.columns:
df = df.merge(ifdf, left_on=['namespace', 'hostname', 'vlan'],
right_on=['namespace', 'hostname', 'vlan'],
how='outer', suffixes=('', '_y'))
# We fix the VRF from L2 VNIs from the SVI interface's master info
# and we drop those entries that are just local VLANs, without VNI
df['vrf'] = np.where(df['vrf'] == "", df['master'], df['vrf'])
df = df.drop(columns=['master', 'timestamp_y'], errors='ignore') \
.dropna(subset=['vni'])
# Fill out the numMacs and numArps columns if we can
if 'numMacs' in df.columns:
vlanlist = list(set(df[df.numMacs == 0]['vlan'].to_list()))
else:
vlanlist = []
if vlanlist:
if 'numMacs' in df.columns:
macdf = self._get_table_sqobj('macs').get(
namespace=kwargs.get('namespace'))
df['numMacs'] = df.apply(self._count_macs, axis=1,
args=(macdf, ))
return df.drop(columns=drop_cols, errors='ignore')
def _count_macs(self, x, df):
return df[(df.namespace == x['namespace']) & (df.vlan == x['vlan']) &
(df.hostname == x['hostname'])].count()
def summarize(self, **kwargs):
self._init_summarize(self.iobj._table, **kwargs)
if self.summary_df.empty:
return self.summary_df
self._summarize_on_add_field = [
('uniqueVtepCnt', 'priVtepIp', 'nunique'),
('uniqueVniCnt', 'vni', 'nunique'),
]
l3vni_count = self.summary_df.query('type == "L3" and vni != 0') \
.groupby(by=['namespace'])['vni'].count()
for ns in self.ns.keys():
if l3vni_count.get(ns, 0):
self.ns[ns]['mode'] = 'symmetric'
else:
self.ns[ns]['mode'] = 'asymmetric'
self.summary_row_order.append('mode')
self._summarize_on_add_with_query = [
('uniqueL3VniCnt', 'type == "L3" and vni != 0', 'vrf', 'nunique'),
('uniqueL2VniCnt', 'type == "L2"', 'vni', 'nunique'),
('uniqueMulticastGroups', 'mcastGroup != "0.0.0.0"', 'mcastGroup',
'nunique'),
('vnisUsingMulticast',
'type == "L2" and replicationType == "multicast"',
'vni', 'nunique'),
('vnisUsingIngressRepl',
'type == "L2" and replicationType == "ingressBGP"',
'vni', 'nunique')
]
self._gen_summarize_data()
# To summarize accurately, we need to explode the remoteVteps
# column from a list to an individual entry for each
# remoteVteps in that list. The resulting set can be huge if them
# number of Vteps times the ports is huge.
#
# the 'explode' only works post pandas 0.25
self.summary_df = self.summary_df.explode(
'remoteVtepList').dropna(how='any')
self.nsgrp = self.summary_df.groupby(by=["namespace"])
if not self.summary_df.empty:
herPerVtepCnt = self.summary_df.groupby(
by=['namespace', 'hostname'])['remoteVtepList'].nunique()
self._add_stats_to_summary(herPerVtepCnt, 'remoteVtepsPerVtepStat',
filter_by_ns=True)
self.summary_row_order.append('remoteVtepsPerVtepStat')
self._post_summarize(check_empty_col='uniqueVtepCnt')
return self.ns_df.convert_dtypes()
def aver(self, **kwargs) -> pd.DataFrame:
"""Assert for EVPN Data"""
assert_cols = ["namespace", "hostname", "vni", "vlan", "remoteVtepList", "vrf",
"mcastGroup", "type", "priVtepIp", "state", "l2VniList",
"ifname", "secVtepIp"]
kwargs.pop("columns", None) # Loose whatever's passed
status = kwargs.pop('status', 'all')
df = self.get(columns=assert_cols, **kwargs)
if df.empty:
df = | pd.DataFrame(columns=assert_cols) | pandas.DataFrame |
import pandas as pd
import json
from os import mkdir, rmdir
from . import utils
from synapseclient import File, Activity
import numpy as np
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def create_temp_location():
"""
Creates a temporary location to store the json files
"""
try:
mkdir('./staging')
except FileExistsError:
return
def delete_temp_location():
"""
Deletes the default temporary location
"""
rmdir('./staging')
def remove_non_values(d: dict) -> dict:
"""
Given a dictionary, remove all keys whose values are null.
Values can be of a few types: a dict, a list, None/NaN, and a regular element - such as str or number;
each one of the cases is handled separately, if a key contains a list, the list can contain elements,
or nested dicts. The same goes for dictionaries.
"""
cleaned_dict = {}
for key, value in d.items():
# case 1: dict
if isinstance(value, dict):
nested_dict = remove_non_values(value)
if len(nested_dict.keys()) > 0:
cleaned_dict[key] = nested_dict
# case 2: list
if isinstance(value, list):
for elem in value: # value is a list
if isinstance(elem, dict):
nested_dict = remove_non_values(elem)
else:
cleaned_dict[key] = value
# case 3: None/NaN
elif | pd.isna(value) | pandas.isna |
from igraph import *
import leidenalg as la
import pandas as pd
import OmicsIntegrator as oi
import networkx as nx
import numpy as np
import pickle
import matplotlib
import matplotlib.pyplot as plot
matplotlib.rcParams['pdf.fonttype'] = 42
#import plotly.express as px
class hyphalNetwork:
"""
The hypha class represents a set of individual sample networks/graphs and
creates the communities shared between each
"""
def __init__(self, proteinWeights, interactome):
"""
Hypha class
Based a dictionary of proteins with weights and an interactome,
derives individual protein networks for each dictionary and then
finds the communities shared between them
"""
self.proteins = proteinWeights #dictionary of protein weights for each forest
self.interactome = interactome #interactome
self.forests = dict()#forests
self.community_enrichment = {} # enriched terms
self.forest_enrichment = dict() #enriched terms
self.node_counts = dict() #nodes found in forests and their counts
for pat in proteinWeights.keys():
print('Running PCSF for sample '+pat)
forest = self._getForest(list(proteinWeights[pat].items()))
for node in list(forest.nodes()):
if node in list(self.node_counts.keys()):
self.node_counts[node] += 1
else:
self.node_counts[node] = 1
self.forests[pat] = forest
self.communities = self.runCommunityWithMultiplex()
self.distVals = self.within_distances() #compute distances between forests
self.assignedCommunities = self.community_to_samples()
print('Created hypha across ', len(self.node_counts), 'nodes and',\
len(proteinWeights), 'forests')
def _to_file(self, fname):
""" enables saving to file"""
pickle.dump(self, open(fname, 'wb'))
def _get_subnet(self, nodenames):
"""
Searches for nodes in the interactome that have nodes of a specific name,
then returns inferred subgraph, nothing fancy
"""
nodelist = []
for nn in nodenames:
try:
nodelist.append(self.interactome.vs.find(name = nn))
except ValueError:
print('Node '+nn+' not found')
sub_g = self.interactome.subgraph(nodelist)
print("Subgraph has", len(sub_g.vs), "nodes")
return sub_g
def _nx2igraph(self, nx_graph):
"""Helper function that maps networkx object to igraph"""
new_g = Graph(directed=False)
alln = self.node_counts.keys()
for n in alln:
new_g.add_vertex(n)
for e in nx_graph.edges():
new_g.add_edge(e[0], e[1])
return new_g
def _getForest(self, nodeweights):
"""
Uses the omics integrator package to build a weighted subgrpah, inferring
additional nodes and returning a larger subgraph on the interatome
"""
#map nodes interactome
pdf = pd.DataFrame(nodeweights)
pdf.columns = ['name', 'prize']
graph = self.interactome
graph._prepare_prizes(pdf)
verts, edges = graph.pcsf()
forest, augmented_forest = graph.output_forest_as_networkx(verts, edges)
return forest #TODO: add in parameter shift for 0 size networks
def runCommunityWithMultiplex(self):
"""
Primary method for now
TODO: update with moer features
Currently takes all forests in hypha (defined by nodes on shared edge set)
and finds communities
"""
print('running community detection')
optimizer = la.Optimiser()
netlist = []
for nx_g in self.forests.values():
netlist.append(self._nx2igraph(nx_g))
[membership, improv] = la.find_partition_multiplex(netlist,\
la.ModularityVertexPartition)
comm_df = pd.DataFrame({'Node': list(self.node_counts.keys()),\
'Community': membership})
comm_dict = dict(comm_df.groupby('Community')['Node'].apply(list))
return comm_dict
def node_stats(self):
"""
Computes statistics about the community and returns datafrme
"""
comms = []
for no, vals in self.node_counts.items():
ndict = {'Node': no,'NumForests': vals}
for comm, nodelist in self.communities.items():
if no in nodelist:
ndict['Community'] = comm
comms.append(ndict)
return pd.DataFrame(comms)
def community_stats(self, comm_names=[], prefix=''):
"""
computes stats about community and returns data frame
"""
#print(self.communities.values())
for_d = pd.DataFrame(zip(self.assignedCommunities.keys(),self.assignedCommunities.values()),\
columns=['forest','community'])
num_forests = dict(for_d.groupby('community')['forest'].apply(len))
stat_list = []
for comm, nodes in self.communities.items():
res = {'Community': comm,\
'Nodes': len(nodes)}
if comm in num_forests.keys():
res['Forests'] = num_forests[comm]
else:
res['Forests'] = 0
if str(comm) in self.community_enrichment.keys():
res['Enriched terms'] = len(self.community_enrichment[str(comm)])
else:
res['Enriched terms'] = 1.0
stat_list.append(res)
if prefix != '' and comm in num_forests.keys() and num_forests[comm]>10:
self.to_graph(comm, prefix=prefix+str(comm))
##plot the bar plot of communities and networks
pdf = | pd.DataFrame(stat_list) | pandas.DataFrame |
"""tests.core.archive.test_archive.py
Copyright Keithley Instruments, LLC.
Licensed under MIT (https://github.com/tektronix/syphon/blob/master/LICENSE)
"""
import os
from typing import List, Optional, Tuple
import pytest
from _pytest.capture import CaptureFixture
from _pytest.fixtures import FixtureRequest
from _pytest.monkeypatch import MonkeyPatch
from pandas import DataFrame, concat, read_csv
from pandas.testing import assert_frame_equal
from py._path.local import LocalPath
from sortedcontainers import SortedDict, SortedList
import syphon
import syphon.hash
import syphon.schema
from syphon.core.archive.filemap import MappingBehavior
from syphon.core.check import DEFAULT_FILE as DEFAULT_HASH_FILE
from ... import get_data_path, rand_string
from ...assert_utils import assert_captured_outerr
from ...types import PathType
@pytest.fixture(
params=[
("iris.csv", SortedDict({"0": "Name"})),
("iris_plus.csv", SortedDict({"0": "Species", "1": "PetalColor"})),
(
"auto-mpg.csv",
SortedDict({"0": "model year", "1": "cylinders", "2": "origin"}),
),
]
)
def archive_params(request: FixtureRequest) -> Tuple[str, SortedDict]:
return request.param
@pytest.fixture(
params=[
(
"iris-part-1-of-6",
"iris-part-1-of-6-combined.csv",
SortedDict({"0": "Species", "1": "PetalColor"}),
),
(
"iris-part-2-of-6",
"iris-part-2-of-6-combined.csv",
SortedDict({"0": "Species", "1": "PetalColor"}),
),
(
"iris-part-3-of-6",
"iris-part-3-of-6-combined.csv",
SortedDict({"0": "Species", "1": "PetalColor"}),
),
(
"iris-part-4-of-6",
"iris-part-4-of-6-combined.csv",
SortedDict({"0": "Species", "1": "PetalColor"}),
),
(
"iris-part-5-of-6",
"iris-part-5-of-6-combined.csv",
SortedDict({"0": "Species", "1": "PetalColor"}),
),
(
"iris-part-6-of-6",
"iris-part-6-of-6-combined.csv",
SortedDict({"0": "Species", "1": "PetalColor"}),
),
]
)
def archive_meta_params(request: FixtureRequest) -> Tuple[str, str, SortedDict]:
return request.param
@pytest.fixture(params=[PathType.ABSOLUTE, PathType.RELATIVE])
def styled_cache_file(request: FixtureRequest, cache_file: LocalPath) -> str:
"""Breaks if any test in this file changes the current working directory!"""
if request.param == PathType.ABSOLUTE:
return str(cache_file)
elif request.param == PathType.RELATIVE:
return os.path.relpath(cache_file, os.getcwd())
else:
raise TypeError(f"Unsupported PathType '{request.param}'")
@pytest.fixture(params=[PathType.ABSOLUTE, PathType.RELATIVE])
def styled_hash_file(
request: FixtureRequest, hash_file: Optional[LocalPath]
) -> Optional[str]:
"""Breaks if any test in this file changes the current working directory!"""
if hash_file is None:
return None
if request.param == PathType.ABSOLUTE:
return str(hash_file)
elif request.param == PathType.RELATIVE:
return os.path.relpath(hash_file, os.getcwd())
else:
raise TypeError(f"Unsupported PathType '{request.param}'")
def _get_expected_paths(
path: str,
schema: SortedDict,
subset: DataFrame,
filename: str,
data: SortedList = SortedList(),
) -> SortedList:
path_list = data.copy()
this_schema = schema.copy()
try:
_, header = this_schema.popitem(index=0)
except KeyError:
path_list.add(os.path.join(path, filename))
return path_list
if header not in subset.columns:
return path_list
for value in subset.get(header).drop_duplicates().values:
new_subset = subset.loc[subset.get(header) == value]
value = value.lower().replace(" ", "_")
if value[-1] == ".":
value = value[:-1]
path_list = _get_expected_paths(
os.path.join(path, value), this_schema, new_subset, filename, data=path_list
)
return path_list
class TestArchive(object):
class ArchiveCacheAndHashPassthruChecker(object):
"""Asserts that the cache and hash file paths are not edited before being sent
to the `build` subcommand.
"""
def __init__(
self, monkeypatch: MonkeyPatch, cache_file: str, hash_file: Optional[str],
):
from syphon.core.build import build
self._syphon_build = build
self._monkeypatch: MonkeyPatch = monkeypatch
self.cache_file: str = cache_file
self.hash_file: Optional[str] = hash_file
def __call__(self, *args, **kwargs) -> bool:
with self._monkeypatch.context() as m:
m.setattr(syphon.core.build, "build", value=self._build_shim)
return syphon.archive(*args, **kwargs)
def _build_shim(self, *args, **kwargs) -> bool:
"""Everything is converted to str or None so test cases don't have to worry
about using LocalPath.
"""
# XXX: If the syphon.build argument order changes,
# then we need to access a different index!
assert (
str(args[0]) == self.cache_file
), f"Cache filepath edited from '{self.cache_file}' to '{args[0]}'"
# XXX: If the name of the argument changes,
# then we need to access a different key!
assert "hash_filepath" in kwargs
actual_hash_file = (
None
if kwargs["hash_filepath"] is None
else str(kwargs["hash_filepath"])
)
assert (
actual_hash_file == self.hash_file
), f"Hash filepath edited from '{self.hash_file}' to '{actual_hash_file}'"
return self._syphon_build(*args, **kwargs)
@pytest.fixture(scope="function")
def archive_fixture(
self,
monkeypatch: MonkeyPatch,
styled_cache_file: str,
styled_hash_file: Optional[str],
) -> "TestArchive.ArchiveCacheAndHashPassthruChecker":
return TestArchive.ArchiveCacheAndHashPassthruChecker(
monkeypatch, styled_cache_file, styled_hash_file
)
def test_empty_datafile(
self, capsys: CaptureFixture, archive_dir: LocalPath, verbose: bool
):
datafile = os.path.join(get_data_path(), "empty.csv")
assert not syphon.archive(archive_dir, [datafile], verbose=verbose)
assert_captured_outerr(capsys.readouterr(), verbose, False)
assert not os.path.exists(os.path.join(os.path.dirname(datafile), "#lock"))
def test_increment_one_to_many_with_metadata_with_schema(
self,
capsys: CaptureFixture,
archive_dir: LocalPath,
archive_fixture: "TestArchive.ArchiveCacheAndHashPassthruChecker",
schema_file: Optional[LocalPath],
verbose: bool,
):
# List of (expected frame filename, data filename, metadata filename) tuples
targets: List[Tuple[str, str, List[str]]] = [
(
"iris-part-1-of-6-combined.csv",
"iris-part-1-of-6.csv",
[
"iris-part-1-of-6-meta-part-1-of-2.meta",
"iris-part-1-of-6-meta-part-2-of-2.meta",
],
),
(
"iris-part-1-2.csv",
"iris-part-2-of-6.csv",
[
"iris-part-2-of-6-meta-part-1-of-2.meta",
"iris-part-2-of-6-meta-part-2-of-2.meta",
],
),
(
"iris-part-1-2-3.csv",
"iris-part-3-of-6.csv",
[
"iris-part-3-of-6-meta-part-1-of-2.meta",
"iris-part-3-of-6-meta-part-2-of-2.meta",
],
),
(
"iris-part-1-2-3-4.csv",
"iris-part-4-of-6.csv",
[
"iris-part-4-of-6-meta-part-1-of-2.meta",
"iris-part-4-of-6-meta-part-2-of-2.meta",
],
),
(
"iris-part-1-2-3-4-5.csv",
"iris-part-5-of-6.csv",
[
"iris-part-5-of-6-meta-part-1-of-2.meta",
"iris-part-5-of-6-meta-part-2-of-2.meta",
],
),
(
"iris_plus.csv",
"iris-part-6-of-6.csv",
[
"iris-part-6-of-6-meta-part-1-of-2.meta",
"iris-part-6-of-6-meta-part-2-of-2.meta",
],
),
]
expected_hashfile = (
LocalPath(archive_fixture.cache_file).dirpath(DEFAULT_HASH_FILE)
if archive_fixture.hash_file is None
else archive_fixture.hash_file
)
assert not os.path.exists(expected_hashfile)
assert not os.path.exists(archive_fixture.cache_file)
assert len(archive_dir.listdir()) == 0
expected_schemafile = (
archive_dir.join(syphon.schema.DEFAULT_FILE)
if schema_file is None
else schema_file
)
assert not os.path.exists(expected_schemafile)
syphon.init(
SortedDict({"0": "PetalColor", "1": "Species"}), expected_schemafile
)
assert os.path.exists(expected_schemafile)
for expected_frame_filename, data_filename, metadata_filenames in targets:
assert archive_fixture(
archive_dir,
[os.path.join(get_data_path(), data_filename)],
meta_files=[
os.path.join(get_data_path(), m) for m in metadata_filenames
],
filemap_behavior=MappingBehavior.ONE_TO_MANY,
schema_filepath=schema_file,
cache_filepath=archive_fixture.cache_file,
hash_filepath=archive_fixture.hash_file,
verbose=verbose,
)
assert_captured_outerr(capsys.readouterr(), verbose, False)
expected_frame = DataFrame(
read_csv(
os.path.join(get_data_path(), expected_frame_filename),
dtype=str,
index_col="Index",
)
)
expected_frame.sort_index(inplace=True)
actual_frame = DataFrame(
read_csv(str(archive_fixture.cache_file), dtype=str, index_col="Index")
)
actual_frame = actual_frame.reindex(columns=expected_frame.columns)
actual_frame.sort_index(inplace=True)
assert_captured_outerr(capsys.readouterr(), False, False)
assert_frame_equal(expected_frame, actual_frame)
assert os.path.exists(expected_hashfile)
assert syphon.check(
archive_fixture.cache_file,
hash_filepath=expected_hashfile,
verbose=verbose,
)
def test_increment_with_metadata_with_schema(
self,
capsys: CaptureFixture,
archive_dir: LocalPath,
archive_fixture: "TestArchive.ArchiveCacheAndHashPassthruChecker",
schema_file: Optional[LocalPath],
verbose: bool,
):
# List of (expected frame filename, data filename, metadata filename) tuples
targets: List[Tuple[str, str, str]] = [
(
"iris-part-1-of-6-combined.csv",
"iris-part-1-of-6.csv",
"iris-part-1-of-6.meta",
),
("iris-part-1-2.csv", "iris-part-2-of-6.csv", "iris-part-2-of-6.meta"),
("iris-part-1-2-3.csv", "iris-part-3-of-6.csv", "iris-part-3-of-6.meta"),
("iris-part-1-2-3-4.csv", "iris-part-4-of-6.csv", "iris-part-4-of-6.meta"),
(
"iris-part-1-2-3-4-5.csv",
"iris-part-5-of-6.csv",
"iris-part-5-of-6.meta",
),
("iris_plus.csv", "iris-part-6-of-6.csv", "iris-part-6-of-6.meta"),
]
expected_hashfile = (
LocalPath(archive_fixture.cache_file).dirpath(DEFAULT_HASH_FILE)
if archive_fixture.hash_file is None
else archive_fixture.hash_file
)
assert not os.path.exists(expected_hashfile)
assert not os.path.exists(archive_fixture.cache_file)
assert len(archive_dir.listdir()) == 0
expected_schemafile = (
archive_dir.join(syphon.schema.DEFAULT_FILE)
if schema_file is None
else schema_file
)
assert not os.path.exists(expected_schemafile)
syphon.init(
SortedDict({"0": "PetalColor", "1": "Species"}), expected_schemafile
)
assert os.path.exists(expected_schemafile)
for expected_frame_filename, data_filename, metadata_filename in targets:
assert archive_fixture(
archive_dir,
[os.path.join(get_data_path(), data_filename)],
meta_files=[os.path.join(get_data_path(), metadata_filename)],
schema_filepath=schema_file,
cache_filepath=archive_fixture.cache_file,
hash_filepath=archive_fixture.hash_file,
verbose=verbose,
)
assert_captured_outerr(capsys.readouterr(), verbose, False)
expected_frame = DataFrame(
read_csv(
os.path.join(get_data_path(), expected_frame_filename),
dtype=str,
index_col="Index",
)
)
expected_frame.sort_index(inplace=True)
actual_frame = DataFrame(
read_csv(str(archive_fixture.cache_file), dtype=str, index_col="Index")
)
actual_frame.sort_index(inplace=True)
assert_captured_outerr(capsys.readouterr(), False, False)
assert_frame_equal(expected_frame, actual_frame)
assert os.path.exists(expected_hashfile)
assert syphon.check(
archive_fixture.cache_file,
hash_filepath=expected_hashfile,
verbose=verbose,
)
def test_increment_with_metadata_without_schema(
self,
capsys: CaptureFixture,
archive_dir: LocalPath,
archive_fixture: "TestArchive.ArchiveCacheAndHashPassthruChecker",
verbose: bool,
):
# List of (expected frame filename, data filename, metadata filename) tuples
targets: List[Tuple[str, str, str]] = [
(
"iris-part-1-of-6-combined.csv",
"iris-part-1-of-6.csv",
"iris-part-1-of-6.meta",
),
("iris-part-1-2.csv", "iris-part-2-of-6.csv", "iris-part-2-of-6.meta"),
("iris-part-1-2-3.csv", "iris-part-3-of-6.csv", "iris-part-3-of-6.meta"),
("iris-part-1-2-3-4.csv", "iris-part-4-of-6.csv", "iris-part-4-of-6.meta"),
(
"iris-part-1-2-3-4-5.csv",
"iris-part-5-of-6.csv",
"iris-part-5-of-6.meta",
),
("iris_plus.csv", "iris-part-6-of-6.csv", "iris-part-6-of-6.meta"),
]
expected_hashfile = (
LocalPath(archive_fixture.cache_file).dirpath(DEFAULT_HASH_FILE)
if archive_fixture.hash_file is None
else archive_fixture.hash_file
)
assert not os.path.exists(expected_hashfile)
assert not os.path.exists(archive_fixture.cache_file)
assert len(archive_dir.listdir()) == 0
for expected_frame_filename, data_filename, metadata_filename in targets:
assert archive_fixture(
archive_dir,
[os.path.join(get_data_path(), data_filename)],
meta_files=[os.path.join(get_data_path(), metadata_filename)],
cache_filepath=archive_fixture.cache_file,
hash_filepath=archive_fixture.hash_file,
verbose=verbose,
)
assert_captured_outerr(capsys.readouterr(), verbose, False)
expected_frame = DataFrame(
read_csv(
os.path.join(get_data_path(), expected_frame_filename),
dtype=str,
index_col="Index",
)
)
expected_frame.sort_index(inplace=True)
actual_frame = DataFrame(
read_csv(str(archive_fixture.cache_file), dtype=str, index_col="Index")
)
actual_frame.sort_index(inplace=True)
assert_captured_outerr(capsys.readouterr(), False, False)
assert_frame_equal(expected_frame, actual_frame)
assert os.path.exists(expected_hashfile)
assert syphon.check(
archive_fixture.cache_file,
hash_filepath=expected_hashfile,
verbose=verbose,
)
def test_increment_without_metadata_with_schema(
self,
capsys: CaptureFixture,
archive_dir: LocalPath,
archive_fixture: "TestArchive.ArchiveCacheAndHashPassthruChecker",
schema_file: Optional[LocalPath],
verbose: bool,
):
# List of (expected frame filename, data filename) tuples
targets: List[Tuple[str, str]] = [
("iris-part-1-of-6-combined.csv", "iris-part-1-of-6-combined.csv"),
("iris-part-1-2.csv", "iris-part-2-of-6-combined.csv"),
("iris-part-1-2-3.csv", "iris-part-3-of-6-combined.csv"),
("iris-part-1-2-3-4.csv", "iris-part-4-of-6-combined.csv"),
("iris-part-1-2-3-4-5.csv", "iris-part-5-of-6-combined.csv"),
("iris_plus.csv", "iris-part-6-of-6-combined.csv"),
]
expected_hashfile = (
LocalPath(archive_fixture.cache_file).dirpath(DEFAULT_HASH_FILE)
if archive_fixture.hash_file is None
else archive_fixture.hash_file
)
assert not os.path.exists(expected_hashfile)
assert not os.path.exists(archive_fixture.cache_file)
assert len(archive_dir.listdir()) == 0
expected_schemafile = (
archive_dir.join(syphon.schema.DEFAULT_FILE)
if schema_file is None
else schema_file
)
assert not os.path.exists(expected_schemafile)
syphon.init(
SortedDict({"0": "PetalColor", "1": "Species"}), expected_schemafile
)
assert os.path.exists(expected_schemafile)
for expected_frame_filename, data_filename in targets:
assert archive_fixture(
archive_dir,
[os.path.join(get_data_path(), data_filename)],
schema_filepath=schema_file,
cache_filepath=archive_fixture.cache_file,
hash_filepath=archive_fixture.hash_file,
verbose=verbose,
)
assert_captured_outerr(capsys.readouterr(), verbose, False)
expected_frame = DataFrame(
read_csv(
os.path.join(get_data_path(), expected_frame_filename),
dtype=str,
index_col="Index",
)
)
expected_frame.sort_index(inplace=True)
actual_frame = DataFrame(
read_csv(str(archive_fixture.cache_file), dtype=str, index_col="Index")
)
actual_frame.sort_index(inplace=True)
assert_captured_outerr(capsys.readouterr(), False, False)
assert_frame_equal(expected_frame, actual_frame)
assert os.path.exists(expected_hashfile)
assert syphon.check(
archive_fixture.cache_file,
hash_filepath=expected_hashfile,
verbose=verbose,
)
def test_increment_without_metadata_without_schema(
self,
capsys: CaptureFixture,
archive_dir: LocalPath,
archive_fixture: "TestArchive.ArchiveCacheAndHashPassthruChecker",
schema_file: Optional[LocalPath],
verbose: bool,
):
# List of (expected frame filename, data filename) tuples
targets: List[Tuple[str, str]] = [
("iris-part-1-of-6-combined.csv", "iris-part-1-of-6.csv"),
("iris-part-1-2.csv", "iris-part-2-of-6.csv"),
("iris-part-1-2-3.csv", "iris-part-3-of-6.csv"),
("iris-part-1-2-3-4.csv", "iris-part-4-of-6.csv"),
("iris-part-1-2-3-4-5.csv", "iris-part-5-of-6.csv"),
("iris_plus.csv", "iris-part-6-of-6.csv"),
]
expected_hashfile = (
LocalPath(archive_fixture.cache_file).dirpath(DEFAULT_HASH_FILE)
if archive_fixture.hash_file is None
else archive_fixture.hash_file
)
assert not os.path.exists(expected_hashfile)
assert not os.path.exists(archive_fixture.cache_file)
assert len(archive_dir.listdir()) == 0
for expected_frame_filename, data_filename in targets:
assert archive_fixture(
archive_dir,
[os.path.join(get_data_path(), data_filename)],
cache_filepath=archive_fixture.cache_file,
hash_filepath=archive_fixture.hash_file,
verbose=verbose,
)
assert_captured_outerr(capsys.readouterr(), verbose, False)
expected_frame = DataFrame(
read_csv(
os.path.join(get_data_path(), expected_frame_filename),
dtype=str,
index_col="Index",
)
)
del expected_frame["Species"]
del expected_frame["PetalColor"]
expected_frame.sort_index(inplace=True)
actual_frame = DataFrame(
read_csv(str(archive_fixture.cache_file), dtype=str, index_col="Index")
)
actual_frame.sort_index(inplace=True)
assert_captured_outerr(capsys.readouterr(), False, False)
assert_frame_equal(expected_frame, actual_frame)
assert os.path.exists(expected_hashfile)
assert syphon.check(
archive_fixture.cache_file,
hash_filepath=expected_hashfile,
verbose=verbose,
)
def test_no_datafiles(
self, capsys: CaptureFixture, archive_dir: LocalPath, verbose: bool
):
assert not syphon.archive(archive_dir, [], verbose=verbose)
assert_captured_outerr(capsys.readouterr(), verbose, False)
def test_without_metadata_with_schema(
self,
capsys: CaptureFixture,
archive_params: Tuple[str, SortedDict],
archive_dir: LocalPath,
overwrite: bool,
verbose: bool,
):
filename: str
schema: SortedDict
filename, schema = archive_params
datafile = os.path.join(get_data_path(), filename)
schemafile = os.path.join(archive_dir, syphon.schema.DEFAULT_FILE)
syphon.init(schema, schemafile)
expected_df = DataFrame(read_csv(datafile, dtype=str))
expected_df.sort_values(list(expected_df.columns), inplace=True)
expected_df.reset_index(drop=True, inplace=True)
expected_paths: SortedList = _get_expected_paths(
archive_dir, schema, expected_df, filename
)
if overwrite:
for e in expected_paths:
os.makedirs(os.path.dirname(e), exist_ok=True)
with open(e, mode="w") as fd:
fd.write(rand_string())
assert syphon.archive(
archive_dir,
[datafile],
schema_filepath=schemafile,
overwrite=overwrite,
verbose=verbose,
)
assert not os.path.exists(os.path.join(os.path.dirname(datafile), "#lock"))
actual_frame = DataFrame()
actual_paths = SortedList()
for root, _, files in os.walk(archive_dir):
for f in files:
if ".csv" in f:
filepath: str = os.path.join(root, f)
actual_paths.add(filepath)
actual_frame = concat(
[actual_frame, DataFrame( | read_csv(filepath, dtype=str) | pandas.read_csv |
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, Unique, UniqueCombinations)
def dummy_transform_table(table_data):
return table_data
def dummy_reverse_transform_table(table_data):
return table_data
def dummy_is_valid_table(table_data):
return [True] * len(table_data)
def dummy_transform_table_column(table_data, column):
return table_data
def dummy_reverse_transform_table_column(table_data, column):
return table_data
def dummy_is_valid_table_column(table_data, column):
return [True] * len(table_data[column])
def dummy_transform_column(column_data):
return column_data
def dummy_reverse_transform_column(column_data):
return column_data
def dummy_is_valid_column(column_data):
return [True] * len(column_data)
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid_table'
# Run
instance = CustomConstraint(
transform=dummy_transform_table,
reverse_transform=dummy_reverse_transform_table,
is_valid=is_valid_fqn
)
# Assert
assert instance._transform == dummy_transform_table
assert instance._reverse_transform == dummy_reverse_transform_table
assert instance._is_valid == dummy_is_valid_table
def test__run_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy transform function with ``table_data`` argument.
Side Effects:
- Run transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` argument.
Side Effects:
- Run reverse transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = reverse_transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` argument.
Side Effects:
- Run is valid function once with ``table_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table)
# Run
instance = CustomConstraint(is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
assert called[0][1] == 'a'
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run reverse transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
assert called[0][1] == 'a'
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` and ``column`` argument.
Side Effects:
- Run is valid function once with ``table_data`` and ``column`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
assert called[0][1] == 'a'
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy transform function with ``column_data`` argument.
Side Effects:
- Run transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy reverse transform function with ``column_data`` argument.
Side Effects:
- Run reverse transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy is valid function with ``column_data`` argument.
Side Effects:
- Run is valid function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
np.testing.assert_array_equal(is_valid, expected_out)
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == tuple(columns)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init__with_one_column(self):
"""Test the ``UniqueCombinations.__init__`` method with only one constraint column.
Expect a ``ValueError`` because UniqueCombinations requires at least two
constraint columns.
Side effects:
- A ValueError is raised
"""
# Setup
columns = ['c']
# Run and assert
with pytest.raises(ValueError):
UniqueCombinations(columns=columns)
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test__validate_scalar(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = 'b'
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = 'b'
scalar = 'high'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_list(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = ['b']
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = ['b']
scalar = 'low'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_error(self):
"""Test the ``_validate_scalar`` method.
This method raises an error when the the scalar column is a list.
Input:
- scalar_column = 0
- column_names = 'b'
Side effect:
- Raise error since the scalar is a list
"""
# Setup
scalar_column = [0]
column_names = 'b'
scalar = 'high'
# Run / Assert
with pytest.raises(TypeError):
GreaterThan._validate_scalar(scalar_column, column_names, scalar)
def test__validate_inputs_high_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
Output:
- low == ['a']
- high == 3
- constraint_columns = ('a')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar='high', drop=None)
# Assert
low == ['a']
high == 3
constraint_columns == ('a',)
def test__validate_inputs_low_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 3
- high = 'b'
- scalar = 'low'
- drop = None
Output:
- low == 3
- high == ['b']
- constraint_columns = ('b')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=3, high='b', scalar='low', drop=None)
# Assert
low == 3
high == ['b']
constraint_columns == ('b',)
def test__validate_inputs_scalar_none(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3 # where 3 is a column name
- scalar = None
- drop = None
Output:
- low == ['a']
- high == [3]
- constraint_columns = ('a', 3)
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar=None, drop=None)
# Assert
low == ['a']
high == [3]
constraint_columns == ('a', 3)
def test__validate_inputs_scalar_none_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a']
- high = ['b', 'c']
- scalar = None
- drop = None
Output:
- low == ['a']
- high == ['b', 'c']
- constraint_columns = ('a', 'b', 'c')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=['a'], high=['b', 'c'], scalar=None, drop=None)
# Assert
low == ['a']
high == ['b', 'c']
constraint_columns == ('a', 'b', 'c')
def test__validate_inputs_scalar_none_two_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a', 0]
- high = ['b', 'c']
- scalar = None
- drop = None
Side effect:
- Raise error because both high and low are more than one column
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=['a', 0], high=['b', 'c'], scalar=None, drop=None)
def test__validate_inputs_scalar_unknown(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'unknown'
- drop = None
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high='b', scalar='unknown', drop=None)
def test__validate_inputs_drop_error_low(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 2
- high = 'b'
- scalar = 'low'
- drop = 'low'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=2, high='b', scalar='low', drop='low')
def test__validate_inputs_drop_error_high(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
- drop = 'high'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high=3, scalar='high', drop='high')
def test__validate_inputs_drop_success(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'high'
- drop = 'low'
Output:
- low = ['a']
- high = 0
- constraint_columns == ('a')
"""
# Run / Assert
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=0, scalar='high', drop='low')
assert low == ['a']
assert high == 0
assert constraint_columns == ('a',)
def test___init___(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == ['a']
assert instance._high == ['b']
assert instance._strict is False
assert instance._scalar is None
assert instance._drop is None
assert instance.constraint_columns == ('a', 'b')
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='transform')
# Assert
assert instance.rebuild_columns == ['b']
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init___high_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 'a'
- high = 0
- strict = True
- drop = 'low'
- scalar = 'high'
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._drop = 'low'
- instance._scalar == 'high'
"""
# Run
instance = GreaterThan(low='a', high=0, strict=True, drop='low', scalar='high')
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
assert instance.constraint_columns == ('a',)
def test___init___low_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 0
- high = 'a'
- strict = True
- drop = 'high'
- scalar = 'low'
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._stric == True
- instance._drop = 'high'
- instance._scalar == 'low'
"""
# Run
instance = GreaterThan(low=0, high='a', strict=True, drop='high', scalar='low')
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
assert instance.constraint_columns == ('a',)
def test___init___strict_is_false(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater_equal``
when ``strict`` is set to ``False``.
Input:
- low = 'a'
- high = 'b'
- strict = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=False)
# Assert
assert instance.operator == np.greater_equal
def test___init___strict_is_true(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater``
when ``strict`` is set to ``True``.
Input:
- low = 'a'
- high = 'b'
- strict = True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Assert
assert instance.operator == np.greater
def test__init__get_columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'high'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'low'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
instance._columns_to_reconstruct == ['a']
def test__init__get_columns_to_reconstruct_scalar_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 0
- scalar = 'high'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high=0, scalar='high')
instance._columns_to_reconstruct == ['a']
def test__get_value_column_list(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
"""
# Setup
instance = GreaterThan(low='a', high='b')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = table_data[['a']].values
np.testing.assert_array_equal(out, expected)
def test__get_value_scalar(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
- scalar = 'low'
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = 3
assert out == expected
def test__get_diff_columns_name_low_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b#'], scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b##']
assert out == expected
def test__get_diff_columns_name_high_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b#']
assert out == expected
def test__get_diff_columns_name_scalar_is_none(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b#', scalar=None)
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b##a']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_low(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a#', 'c'], high='b', scalar=None)
table_data = pd.DataFrame({
'a#': [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a##b', 'c#b']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_high(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['b', 'c'], scalar=None)
table_data = pd.DataFrame({
0: [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b#0', 'c#0']
assert out == expected
def test__check_columns_exist_success(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
instance._check_columns_exist(table_data, 'high')
def test__check_columns_exist_error(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='c')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
with pytest.raises(KeyError):
instance._check_columns_exist(table_data, 'high')
def test__fit_only_one_datetime_arg(self):
"""Test the ``Between._fit`` method by passing in only one arg as datetime.
If only one of the high / low args is a datetime type, expect a ValueError.
Input:
- low is an int column
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
instance = GreaterThan(low='a', high=pd.to_datetime('2021-01-01'), scalar='high')
# Run and assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(ValueError):
instance._fit(table_data)
def test__fit__low_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__low_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='c', high=3, scalar='high')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='c', scalar='low')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `low` if ``instance._scalar`` is ``'high'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` if ``instance._scalar`` is ``'low'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__diff_columns_one_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['a#']
def test__fit__diff_columns_multiple_columns(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['b#a']
def test__fit_int(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'i' for dtype in instance._dtype])
def test__fit_float(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_datetime(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'M' for dtype in instance._dtype])
def test__fit_type__high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'high'``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_type__low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'low'``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test__fit_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], scalar='low')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is multi column, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=2, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is multi column, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=2, high=['a', 'b'], strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If scalar is none, and high is multi column, then
the values in that column should all be higher than
in the low column.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low='b', high=['a', 'c'], strict=False)
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a datetime and low is a column,
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below `high`.
Output:
- True should be returned for the rows where the low
column is below `high`.
"""
# Setup
high_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low='a', high=high_dt, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [datetime(2020, 5, 17), datetime(2020, 2, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a datetime and high is a column,
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below `low`.
Output:
- True should be returned for the rows where the high
column is above `low`.
"""
# Setup
low_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low=low_dt, high='a', strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [datetime(2021, 9, 17), datetime(2021, 7, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_nans(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a NaN row, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, None, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_one_nan(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a row in which we compare one NaN value with one
non-NaN value, expect that `is_valid` returns True.
Input:
- Table with a row that contains only one NaN value.
Output:
- True should be returned for the row with the NaN value.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, 5, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test__transform_int_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_high(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_low(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_float_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type float.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_datetime_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``_transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test__transform_high_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'high'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, scalar='high')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_low_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'low'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, scalar='low')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(3), np.log(2), np.log(1)],
'b#': [np.log(0), np.log(-1), np.log(-2)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(-1), np.log(0), np.log(1)],
'b#': [np.log(2), np.log(3), np.log(4)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high='b', strict=True)
instance._diff_columns = ['a#', 'c#']
instance.constraint_columns = ['a', 'c']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'c#': [np.log(-2)] * 3,
})
pd.testing.assert_frame_equal(out, expected)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('float')]
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, scalar='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, scalar='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
- ``_low`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [0, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 0, 0],
'b': [0, -1, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
- ``_high`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/+4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [6, 6, 4],
'b': [7, 7, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_low`` = ['a', 'c'].
- ``_high`` = ['b'].
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(-2).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high=['b'], strict=True)
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'c#']
instance._columns_to_reconstruct = ['a', 'c']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(1)] * 3,
'c#': [np.log(1)] * 3,
})
out = instance.reverse_transform(transformed)
print(out)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_positive(self):
"""Test the ``GreaterThan.reverse_transform`` method for positive constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(4)],
'b#': [np.log(5), np.log(6), np.log(0)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 0],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_negative(self):
"""Test the ``GreaterThan.reverse_transform`` method for negative constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [-1, -2, 1],
'b': [-4, -5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(0)],
'b#': [np.log(5), np.log(6), np.log(2)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [-1, -2, 0],
'b': [-4, -5, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_scalar`` variable to ``'low'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = None
"""
# Run
instance = Positive(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Positive.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'high' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = 'high'
"""
# Run
instance = Positive(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_scalar`` variable to ``'high'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = None
"""
# Run
instance = Negative(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Negative.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'low' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = 'low'
"""
# Run
instance = Negative(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
if data['a'] is None or data['b'] is None:
return None
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance,
import the formula to use for the computation, and
set the specified constraint column.
Input:
- column = 'col'
- formula = new_column
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
assert instance.constraint_columns == ('col', )
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column,
handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_with_nans(self):
"""Test the ``ColumnFormula.is_valid`` method for with a formula that produces nans.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, None],
'c': [5, 7, None]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test__transform(self):
"""Test the ``ColumnFormula._transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_without_dropping_column(self):
"""Test the ``ColumnFormula._transform`` method without dropping the column.
If `drop_column` is false, expect to not drop the constraint column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column, drop_column=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_missing_column(self):
"""Test the ``ColumnFormula._transform`` method when the constraint column is missing.
When ``_transform`` is called with data that does not contain the constraint column,
expect to return the data as-is.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data, unchanged (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestRounding():
def test___init__(self):
"""Test the ``Rounding.__init__`` method.
It is expected to create a new Constraint instance
and set the rounding args.
Input:
- columns = ['b', 'c']
- digits = 2
"""
# Setup
columns = ['b', 'c']
digits = 2
# Run
instance = Rounding(columns=columns, digits=digits)
# Assert
assert instance._columns == columns
assert instance._digits == digits
def test___init__invalid_digits(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``digits`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 20
"""
# Setup
columns = ['b', 'c']
digits = 20
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits)
def test___init__invalid_tolerance(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``tolerance`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 2
- tolerance = 0.1
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 0.1
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits, tolerance=tolerance)
def test_is_valid_positive_digits(self):
"""Test the ``Rounding.is_valid`` method for a positive digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 1e-3
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12, 5.51, None, 6.941, 1.129],
'c': [5.315, 7.12, 1.12, 9.131, 12.329],
'd': ['a', 'b', 'd', 'e', None],
'e': [123.31598, -1.12001, 1.12453, 8.12129, 1.32923]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_negative_digits(self):
"""Test the ``Rounding.is_valid`` method for a negative digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b']
digits = -2
tolerance = 1
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [401, 500, 6921, 799, None],
'c': [5.3134, 7.1212, 9.1209, 101.1234, None],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_zero_digits(self):
"""Test the ``Rounding.is_valid`` method for a zero digits argument.
Input:
- Table data not with the desired decimal places (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 0
tolerance = 1e-4
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, None, 3, 4],
'b': [4, 5.5, 1.2, 6.0001, 5.99999],
'c': [5, 7.12, 1.31, 9.00001, 4.9999],
'd': ['a', 'b', None, 'd', 'e'],
'e': [2.1254, 17.12123, 124.12, 123.0112, -9.129434]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_reverse_transform_positive_digits(self):
"""Test the ``Rounding.reverse_transform`` method with positive digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.12345, None, 5.100, 6.0001, 1.7999],
'c': [1.1, 1.234, 9.13459, 4.3248, 6.1312],
'd': ['a', 'b', 'd', 'e', None]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.123, None, 5.100, 6.000, 1.800],
'c': [1.100, 1.234, 9.135, 4.325, 6.131],
'd': ['a', 'b', 'd', 'e', None]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_negative_digits(self):
"""Test the ``Rounding.reverse_transform`` method with negative digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b']
digits = -3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41234.5, None, 5000, 6001, 5928],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41000.0, None, 5000.0, 6000.0, 6000.0],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_zero_digits(self):
"""Test the ``Rounding.reverse_transform`` method with zero digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 0
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12345, None, 5.0, 6.01, 7.9],
'c': [1.1, 1.0, 9.13459, None, 8.89],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.0, None, 5.0, 6.0, 8.0],
'c': [1.0, 1.0, 9.0, None, 9.0],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def transform(data, low, high):
"""Transform to be used for the TestBetween class."""
data = (data - low) / (high - low) * 0.95 + 0.025
return np.log(data / (1.0 - data))
class TestBetween():
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_fit_only_one_datetime_arg(self):
"""Test the ``Between.fit`` method by passing in only one arg as datetime.
If only one of the bound parameters is a datetime type, expect a ValueError.
Input:
- low is an int scalar
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
column = 'a'
low = 0.0
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high)
# Run and assert
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
with pytest.raises(ValueError):
instance.fit(table_data)
def test_transform_scalar_scalar(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as scalars.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_scalar_column(self):
"""Test the ``Between._transform`` method with ``low`` as scalar and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_scalar(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as scalar.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 6]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_datetime(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as datetimes.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
- High and Low as datetimes
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#1900-01-01T00:00:00.000000000#2021-01-01T00:00:00.000000000': transform(
table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_column(self):
"""Test the ``Between._transform`` method with ``low`` as datetime and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#1900-01-01T00:00:00.000000000#b': transform(
table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as datetime.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a#b#2021-01-01T00:00:00.000000000': transform(
table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` and ``high`` as datetime columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as scalars.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as scalar and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as scalar.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_column(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_datetime_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as datetime.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
- High and low as datetimes
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#1900-01-01T00:00:00.000000000#2021-01-01T00:00:00.000000000': transform(
table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_series_equal(expected_out['b'], out['b'])
pd.testing.assert_series_equal(expected_out['a'], out['a'].astype('datetime64[ms]'))
def test_reverse_transform_datetime_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as datetime and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-02'),
pd.to_datetime('2020-08-03'),
]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#1900-01-01T00:00:00.000000000#b': transform(
table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as datetime.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-03'),
pd.to_datetime('2020-08-04'),
],
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a#b#2021-01-01T00:00:00.000000000': transform(
table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_series_equal(expected_out['b'], out['b'])
pd.testing.assert_series_equal(expected_out['a'], out['a'].astype('datetime64[ms]'))
def test_reverse_transform_column_column_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as datetime columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``Between.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the valid row and False
for the other two. (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=True, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out, check_names=False)
def test_is_valid_strict_false(self):
"""Test the ``Between.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the first two rows, and False
for the last one (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=False, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out, check_names=False)
def test_is_valid_scalar_column(self):
"""Test the ``Between.is_valid`` method with ``low`` as scalar and ``high`` as a column.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the two first rows, False
for the last one. (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 0.6],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_scalar(self):
"""Test the ``Between.is_valid`` method with ``low`` as a column and ``high`` as scalar.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the second value is smaller than ``low`` and
last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the first row, False
for the last two. (pandas.Series)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 1.9],
'b': [-0.5, 1, 0.6],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_column(self):
"""Test the ``Between.is_valid`` method with ``low`` and ``high`` as columns.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the two first rows, False
for the last one. (pandas.Series)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 0.6]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_high_nans(self):
"""Test the ``Between.is_valid`` method with nan values in low and high columns.
If one of `low` or `high` is NaN, expect it to be ignored in the comparison.
If both are NaN or the constraint column is NaN, return True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = Between(column='a', low='b', high='c')
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9, 1.0],
'b': [0, None, None, 0.4],
'c': [0.5, None, 0.6, None]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_nans(self):
"""Test the ``Between.is_valid`` method with nan values in constraint column.
If the constraint column is Nan, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = Between(column='a', low='b', high='c')
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, None],
'b': [0, 0.1, 0.5],
'c': [0.5, 1.5, 0.6]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_high_scalar_low_nans(self):
"""Test the ``Between.is_valid`` method with ``high`` as scalar and ``low`` containing NaNs.
The NaNs in ``low`` should be ignored.
Input:
- Table with a NaN row
Output:
- The NaN values should be ignored when making comparisons.
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 1.9],
'b': [-0.5, None, None],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_high_nans_datetime(self):
"""Test the ``Between.is_valid`` method with nan values in low and high datetime columns.
If one of `low` or `high` is NaN, expect it to be ignored in the comparison.
If both are NaN or the constraint column is NaN, return True.
Input:
- Table with row NaN containing NaNs.
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = Between(column='a', low='b', high='c')
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-13'),
pd.to_datetime('2020-08-12'),
pd.to_datetime('2020-08-13'),
pd.to_datetime('2020-08-14'),
],
'b': [
pd.to_datetime('2020-09-03'),
None,
None,
pd.to_datetime('2020-10-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
None,
None,
]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_nans_datetime(self):
"""Test the ``Between.is_valid`` method with nan values in the constraint column.
If there is a row containing NaNs, expect that `is_valid` returns True.
Input:
- Table with row NaN containing NaNs.
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = Between(column='a', low='b', high='c')
# Run
table_data = pd.DataFrame({
'a': [
None,
pd.to_datetime('2020-08-12'),
pd.to_datetime('2020-08-13'),
],
'b': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-02'),
pd.to_datetime('2020-08-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-01'),
]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_high_datetime_low_nans(self):
"""Test the ``Between.is_valid`` method with ``high`` as datetime and ``low`` with NaNs.
The NaNs in ``low`` should be ignored.
Input:
- Table with a NaN row
Output:
- The NaN values should be ignored when making comparisons.
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2020-08-13')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-08-12'),
pd.to_datetime('2020-08-12'),
pd.to_datetime('2020-08-14'),
],
'b': [
pd.to_datetime('2020-06-03'),
None,
None,
],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
class TestOneHotEncoding():
def test_reverse_transform(self):
"""Test the ``OneHotEncoding.reverse_transform`` method.
It is expected to, for each of the appropriate rows, set the column
with the largest value to one and set all other columns to zero.
Input:
- Table data with any numbers (pandas.DataFrame)
Output:
- Table data where the appropriate rows are one hot (pandas.DataFrame)
"""
# Setup
instance = OneHotEncoding(columns=['a', 'b'])
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.8],
'b': [0.8, 0.1, 0.9],
'c': [1, 2, 3]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [0.0, 1.0, 0.0],
'b': [1.0, 0.0, 1.0],
'c': [1, 2, 3]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_is_valid(self):
"""Test the ``OneHotEncoding.is_valid`` method.
``True`` when for the rows where the data is one hot, ``False`` otherwise.
Input:
- Table data (pandas.DataFrame)
Output:
- Series of ``True`` and ``False`` values (pandas.Series)
"""
# Setup
instance = OneHotEncoding(columns=['a', 'b', 'c'])
# Run
table_data = pd.DataFrame({
'a': [1.0, 1.0, 0.0, 1.0],
'b': [0.0, 1.0, 0.0, 0.5],
'c': [0.0, 2.0, 0.0, 0.0],
'd': [1, 2, 3, 4]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test__sample_constraint_columns_proper(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Expected to return a table with the appropriate complementary column ``b``,
since column ``a`` is entirely defined by the ``condition`` table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table where ``a`` is the same as in ``condition``
and ``b`` is complementary`` (pandas.DataFrame)
"""
# Setup
data = pd.DataFrame({
'a': [1.0, 0.0] * 5,
'b': [0.0, 1.0] * 5,
})
instance = OneHotEncoding(columns=['a', 'b'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [1.0, 0.0, 0.0] * 5,
})
out = instance._sample_constraint_columns(condition)
# Assert
expected_out = pd.DataFrame({
'a': [1.0, 0.0, 0.0] * 5,
'b': [0.0, 1.0, 1.0] * 5,
})
pd.testing.assert_frame_equal(expected_out, out)
def test__sample_constraint_columns_one_one(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Since the condition column contains a one for all rows, expected to assign
all other columns to zeros.
Input:
- Table data (pandas.DataFrame)
Output:
- Table where the first column contains one's and others columns zero's (pandas.DataFrame)
"""
# Setup
data = pd.DataFrame({
'a': [1.0, 0.0] * 5,
'b': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [1.0] * 10
})
out = instance._sample_constraint_columns(condition)
# Assert
expected_out = pd.DataFrame({
'a': [1.0] * 10,
'b': [0.0] * 10,
'c': [0.0] * 10
})
pd.testing.assert_frame_equal(expected_out, out)
def test__sample_constraint_columns_two_ones(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Expected to raise a ``ValueError``, since the condition contains two ones
in a single row.
Input:
- Table data (pandas.DataFrame)
Raise:
- ``ValueError``
"""
# Setup
data = pd.DataFrame({
'a': [1.0, 0.0] * 5,
'b': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [1.0] * 10,
'b': [1.0] * 10,
'c': [0.0] * 10
})
# Assert
with pytest.raises(ValueError):
instance._sample_constraint_columns(condition)
def test__sample_constraint_columns_non_binary(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Expected to raise a ``ValueError``, since the condition contains a non binary value.
Input:
- Table data (pandas.DataFrame)
Raise:
- ``ValueError``
"""
# Setup
data = pd.DataFrame({
'a': [1.0, 0.0] * 5,
'b': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [0.5] * 10
})
# Assert
with pytest.raises(ValueError):
instance._sample_constraint_columns(condition)
def test__sample_constraint_columns_all_zeros(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Expected to raise a ``ValueError``, since the condition contains only zeros.
Input:
- Table data (pandas.DataFrame)
Raise:
- ``ValueError``
"""
# Setup
data = pd.DataFrame({
'a': [1, 0] * 5,
'b': [0, 1] * 5,
'c': [0, 0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [0.0] * 10,
'b': [0.0] * 10,
'c': [0.0] * 10
})
# Assert
with pytest.raises(ValueError):
instance._sample_constraint_columns(condition)
def test__sample_constraint_columns_valid_condition(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Expected to generate a table where every column satisfies the ``condition``.
Input:
- Table data (pandas.DataFrame)
Output:
- Table satifying the ``condition`` (pandas.DataFrame)
"""
# Setup
data = pd.DataFrame({
'a': [1.0, 0.0] * 5,
'b': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [0.0] * 10,
'b': [1.0] * 10,
'c': [0.0] * 10
})
out = instance._sample_constraint_columns(condition)
# Assert
expected_out = pd.DataFrame({
'a': [0.0] * 10,
'b': [1.0] * 10,
'c': [0.0] * 10
})
pd.testing.assert_frame_equal(expected_out, out)
def test__sample_constraint_columns_one_zero(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Since the condition column contains only one zero, expected to randomly sample
from unset columns any valid possibility. Since the ``b`` column in ``data``
contains all the ones, it's expected to return a table where only ``b`` has ones.
Input:
- Table data (pandas.DataFrame)
Output:
- Table where ``b`` is all one`s and other columns are all zero`s (pandas.DataFrame)
"""
# Setup
data = pd.DataFrame({
'a': [0.0, 0.0] * 5,
'b': [1.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'c': [0.0] * 10
})
out = instance._sample_constraint_columns(condition)
# Assert
expected_out = pd.DataFrame({
'c': [0.0] * 10,
'a': [0.0] * 10,
'b': [1.0] * 10
})
pd.testing.assert_frame_equal(expected_out, out)
def test__sample_constraint_columns_one_zero_alt(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Since the condition column contains only one zero, expected to randomly sample
from unset columns any valid possibility.
Input:
- Table data (pandas.DataFrame)
Output:
- Table where ``c`` is all zero`s and ``b`` xor ``a`` is always one (pandas.DataFrame)
"""
# Setup
data = pd.DataFrame({
'a': [1.0, 0.0] * 5,
'b': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'c': [0.0] * 10
})
out = instance._sample_constraint_columns(condition)
# Assert
assert (out['c'] == 0.0).all()
assert ((out['b'] == 1.0) ^ (out['a'] == 1.0)).all()
def test_sample_constraint_columns_list_of_conditions(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Expected to generate a table satisfying the ``condition``.
Input:
- Table data (pandas.DataFrame)
Output:
- Table satisfying the ``condition`` (pandas.DataFrame)
"""
# Setup
data = pd.DataFrame({
'a': [1.0, 0.0] * 5,
'b': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
out = instance._sample_constraint_columns(condition)
# Assert
expected_output = pd.DataFrame({
'a': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5,
'b': [1.0, 0.0] * 5
})
pd.testing.assert_frame_equal(out, expected_output)
def test_sample_constraint_columns_negative_values(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Expected to raise a ``ValueError``, since condition is not a one hot vector.
This tests that even if the sum of a row is one it still crashes.
Input:
- Table data (pandas.DataFrame)
Raise:
- ``ValueError``
"""
# Setup
data = pd.DataFrame({
'a': [1.0] * 10,
'b': [-1.0] * 10,
'c': [1.0] * 10
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [1.0] * 10,
'b': [-1.0] * 10,
'c': [1.0] * 10
})
# Assert
with pytest.raises(ValueError):
instance._sample_constraint_columns(condition)
def test_sample_constraint_columns_all_zeros_but_one(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Expected to generate a table where column ``a`` is filled with ones,
and ``b`` and ``c`` filled with zeros.
Input:
- Table data (pandas.DataFrame)
Output:
- Table satisfying the ``condition`` (pandas.DataFrame)
"""
# Setup
data = pd.DataFrame({
'a': [1.0, 0.0] * 5,
'b': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [0.0] * 10,
'c': [0.0] * 10
})
out = instance._sample_constraint_columns(condition)
# Assert
expected_output = pd.DataFrame({
'a': [0.0] * 10,
'c': [0.0] * 10,
'b': [1.0] * 10
})
pd.testing.assert_frame_equal(out, expected_output)
class TestUnique():
def test___init__(self):
"""Test the ``Unique.__init__`` method.
The ``columns`` should be set to those provided and the
``handling_strategy`` should be set to ``'reject_sampling'``.
Input:
- column names to keep unique.
Output:
- Instance with ``columns`` set and ``transform``
and ``reverse_transform`` methods set to ``instance._identity``.
"""
# Run
instance = Unique(columns=['a', 'b'])
# Assert
assert instance.columns == ['a', 'b']
assert instance.fit_columns_model is False
assert instance.transform == instance._identity
assert instance.reverse_transform == instance._identity
def test___init__one_column(self):
"""Test the ``Unique.__init__`` method.
The ``columns`` should be set to a list even if a string is
provided.
Input:
- string that is the name of a column.
Output:
- Instance with ``columns`` set to list of one element.
"""
# Run
instance = Unique(columns='a')
# Assert
assert instance.columns == ['a']
def test_is_valid(self):
"""Test the ``Unique.is_valid`` method.
This method should return a pd.Series where the index
of the first occurence of a unique combination of ``instance.columns``
is set to ``True``, and every other occurence is set to ``False``.
Input:
- DataFrame with multiple of the same combinations of columns.
Output:
- Series with the index of the first occurences set to ``True``.
"""
# Setup
instance = Unique(columns=['a', 'b', 'c'])
# Run
data = pd.DataFrame({
'a': [1, 1, 2, 2, 3, 4],
'b': [5, 5, 6, 6, 7, 8],
'c': [9, 9, 10, 10, 12, 13]
})
valid = instance.is_valid(data)
# Assert
expected = pd.Series([True, False, True, False, True, True])
pd.testing.assert_series_equal(valid, expected)
def test_is_valid_custom_index_same_values(self):
"""Test the ``Unique.is_valid`` method.
This method should return a pd.Series where the index
of the first occurence of a unique combination of ``instance.columns``
is set to ``True``, and every other occurence is set to ``False``.
Input:
- DataFrame with multiple of the same combinations of columns.
- DataFrame has a custom index column which is set to 0 for rows.
Output:
- Series with the index of the first occurences set to ``True``.
Github Issue:
- Problem is described in: https://github.com/sdv-dev/SDV/issues/616
"""
# Setup
instance = Unique(columns=['a', 'b', 'c'])
# Run
data = pd.DataFrame({
'a': [1, 1, 2, 2, 3],
'b': [5, 5, 6, 6, 7],
'c': [8, 8, 9, 9, 10]
}, index=[0, 0, 0, 0, 0])
valid = instance.is_valid(data)
# Assert
expected = pd.Series([True, False, True, False, True], index=[0, 0, 0, 0, 0])
pd.testing.assert_series_equal(valid, expected)
def test_is_valid_custom_index_not_sorted(self):
"""Test the ``Unique.is_valid`` method.
This method should return a pd.Series where the index
of the first occurence of a unique combination of ``instance.columns``
is set to ``True``, and every other occurence is set to ``False``.
Input:
- DataFrame with multiple of the same combinations of columns.
- DataFrame has a custom index column which is set in an unsorted way.
Output:
- Series with the index of the first occurences set to ``True``.
Github Issue:
- Problem is described in: https://github.com/sdv-dev/SDV/issues/617
"""
# Setup
instance = Unique(columns=['a', 'b', 'c'])
# Run
data = pd.DataFrame({
'a': [1, 1, 2, 2, 3],
'b': [5, 5, 6, 6, 7],
'c': [8, 8, 9, 9, 10]
}, index=[2, 1, 3, 5, 4])
valid = instance.is_valid(data)
# Assert
expected = | pd.Series([True, False, True, False, True], index=[2, 1, 3, 5, 4]) | pandas.Series |
"""
Unit tests for base boost capability.
"""
# Author: <NAME>
# License: MIT
import unittest
import sklearn.ensemble
import pandas as pd
from sklearn.datasets import load_boston, load_linnerud
from sklearn.linear_model import Ridge
from sklearn.preprocessing import StandardScaler
from physlearn import ModifiedPipeline, Regressor
from physlearn.datasets import load_benchmark
from physlearn.loss import LOSS_FUNCTIONS
from physlearn.supervised import ShapInterpret
class TestBaseBoost(unittest.TestCase):
def test_benchmark(self):
_, X_test, _, y_test = load_benchmark(return_split=True)
reg = Regressor()
score = reg.score(y_test, X_test)
self.assertEqual(score['mae'].mean().round(decimals=2), 1.34)
self.assertEqual(score['mse'].mean().round(decimals=2), 4.19)
self.assertEqual(score['rmse'].mean().round(decimals=2), 1.88)
self.assertEqual(score['r2'].mean().round(decimals=2), 0.99)
self.assertEqual(score['ev'].mean().round(decimals=2), 0.99)
def test_pipeline_score_uniform_average(self):
X_train, X_test, y_train, y_test = load_benchmark(return_split=True)
line_search_options = dict(init_guess=1, opt_method='minimize',
method='Nelder-Mead', tol=1e-7,
options={"maxiter": 10000},
niter=None, T=None, loss='lad',
regularization=0.1)
base_boosting_options = dict(n_regressors=3, boosting_loss='lad',
line_search_options=line_search_options)
pipe = ModifiedPipeline(steps=[('scaler', StandardScaler()), ('reg', Ridge())],
base_boosting_options=base_boosting_options)
pipe.fit(X_train, y_train)
score = pipe.score(X_test, y_test, multioutput='uniform_average')
self.assertEqual(score['mae'].mean().round(decimals=2), 1.19)
self.assertEqual(score['mse'].mean().round(decimals=2), 3.49)
self.assertEqual(score['rmse'].mean().round(decimals=2), 1.87)
self.assertEqual(score['r2'].mean().round(decimals=2), 0.99)
self.assertEqual(score['ev'].mean().round(decimals=2), 0.99)
def test_return_incumbent(self):
X_train, X_test, y_train, y_test = load_benchmark(return_split=True)
linear_basis_fn = 'ridge'
n_regressors = 1
boosting_loss = 'ls'
line_search_options = dict(init_guess=1, opt_method='minimize',
method='Nelder-Mead', tol=1e-7,
options={"maxiter": 10000},
niter=None, T=None, loss='lad',
regularization=0.1)
base_boosting_options = dict(n_regressors=n_regressors,
boosting_loss=boosting_loss,
line_search_options=line_search_options)
index = 3
reg = Regressor(regressor_choice=linear_basis_fn, params=dict(alpha=0.1),
target_index=index, base_boosting_options=base_boosting_options)
reg.baseboostcv(X_train.iloc[:10, :], y_train.iloc[:10, :])
self.assertHasAttr(reg, 'return_incumbent_')
def test_baseboostcv_score(self):
X_train, X_test, y_train, y_test = load_benchmark(return_split=True)
stack = dict(regressors=['ridge', 'lgbmregressor'],
final_regressor='ridge')
line_search_options = dict(init_guess=1, opt_method='minimize',
method='Nelder-Mead', tol=1e-7,
options={"maxiter": 10000},
niter=None, T=None, loss='lad',
regularization=0.1)
base_boosting_options = dict(n_regressors=3,
boosting_loss='ls',
line_search_options=line_search_options)
reg = Regressor(regressor_choice='stackingregressor', target_index=0,
stacking_options=dict(layers=stack),
base_boosting_options=base_boosting_options)
y_pred = reg.baseboostcv(X_train, y_train).predict(X_test)
score = reg.score(y_test, y_pred)
self.assertNotHasAttr(reg, 'return_incumbent_')
self.assertGreaterEqual(score['mae'].values, 0.0)
self.assertGreaterEqual(score['mse'].values, 0.0)
self.assertLess(score['mae'].values, 2.0)
self.assertLess(score['mse'].values, 6.2)
def test_squared_error(self):
X, y = load_boston(return_X_y=True)
loss = LOSS_FUNCTIONS['ls']()
score = loss(y=y, raw_predictions=X[:, 0]).round(decimals=2)
sklearn_loss = sklearn.ensemble._gb_losses.LOSS_FUNCTIONS['ls']()
sklearn_score = sklearn_loss(y=y, raw_predictions=X[:, 0]).round(decimals=2)
self.assertEqual(score, sklearn_score)
score = loss(y=pd.Series(y), raw_predictions=X[:, 0]).round(decimals=2)
self.assertEqual(score, sklearn_score)
score = loss(y=pd.Series(y),
raw_predictions=pd.DataFrame(X).iloc[:, 0]).round(decimals=2)
self.assertEqual(score, sklearn_score)
score = loss(y=y,
raw_predictions=pd.DataFrame(X).iloc[:, 0]).round(decimals=2)
self.assertEqual(score, sklearn_score)
X, y = load_linnerud(return_X_y=True)
for i in range(y.shape[1]):
score = loss(y=y[:, i], raw_predictions=X[:, 0]).round(decimals=2)
sklearn_score = sklearn_loss(y=y[:, i],
raw_predictions=X[:, 0]).round(decimals=2)
self.assertEqual(score, sklearn_score)
score = loss(y=pd.DataFrame(y).iloc[:, i],
raw_predictions=X[:, 0]).round(decimals=2)
self.assertEqual(score, sklearn_score)
score = loss(y=pd.DataFrame(y).iloc[:, i],
raw_predictions=pd.DataFrame(X).iloc[:, 0]).round(decimals=2)
self.assertEqual(score, sklearn_score)
score = loss(y=y[:, i],
raw_predictions=pd.DataFrame(X).iloc[:, 0]).round(decimals=2)
self.assertEqual(score, sklearn_score)
def test_absolute_error(self):
X, y = load_boston(return_X_y=True)
loss = LOSS_FUNCTIONS['lad']()
score = loss(y=y, raw_predictions=X[:, 0]).round(decimals=2)
sklearn_loss = sklearn.ensemble._gb_losses.LOSS_FUNCTIONS['lad']()
sklearn_score = sklearn_loss(y=y, raw_predictions=X[:, 0]).round(decimals=2)
self.assertEqual(score, sklearn_score)
score = loss(y=pd.Series(y), raw_predictions=X[:, 0]).round(decimals=2)
self.assertEqual(score, sklearn_score)
score = loss(y=pd.Series(y),
raw_predictions=pd.DataFrame(X).iloc[:, 0]).round(decimals=2)
self.assertEqual(score, sklearn_score)
score = loss(y=y,
raw_predictions=pd.DataFrame(X).iloc[:, 0]).round(decimals=2)
self.assertEqual(score, sklearn_score)
X, y = load_linnerud(return_X_y=True)
for i in range(y.shape[1]):
score = loss(y=y[:, i], raw_predictions=X[:, 0]).round(decimals=2)
sklearn_score = sklearn_loss(y=y[:, i],
raw_predictions=X[:, 0]).round(decimals=2)
self.assertEqual(score, sklearn_score)
score = loss(y=pd.DataFrame(y).iloc[:, i],
raw_predictions=X[:, 0]).round(decimals=2)
self.assertEqual(score, sklearn_score)
score = loss(y=pd.DataFrame(y).iloc[:, i],
raw_predictions=pd.DataFrame(X).iloc[:, 0]).round(decimals=2)
self.assertEqual(score, sklearn_score)
score = loss(y=y[:, i],
raw_predictions= | pd.DataFrame(X) | pandas.DataFrame |
from linearmodels.compat.statsmodels import Summary
import warnings
import numpy as np
from numpy.linalg import pinv
from numpy.testing import assert_allclose, assert_equal
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
import scipy.linalg
from statsmodels.tools.tools import add_constant
from linearmodels.datasets import card
from linearmodels.iv import IV2SLS, IVGMM, IVGMMCUE, IVLIML
from linearmodels.iv.model import _OLS
from linearmodels.iv.results import compare
from linearmodels.shared.hypotheses import WaldTestStatistic
from linearmodels.shared.utility import AttrDict
@pytest.fixture(scope="module")
def data():
n, q, k, p = 1000, 2, 5, 3
rs = np.random.RandomState(12345)
clusters = rs.randint(0, 10, n)
rho = 0.5
r = scipy.linalg.toeplitz([1] + (rho + np.linspace(0.1, -0.1, 8)).tolist())
r[-1, 2:] = 0
r[2:, -1] = 0
r[-1, -1] = 1
v = rs.multivariate_normal(np.zeros(r.shape[0]), r, n)
x = v[:, :k]
z = v[:, k : k + p]
e = v[:, [-1]]
params = np.arange(1, k + 1) / k
params = params[:, None]
y = x @ params + e
exog_instr = np.column_stack((x[:, q:], z))
xhat = exog_instr @ np.linalg.pinv(exog_instr) @ x
nobs, nvar = x.shape
s2 = e.T @ e / nobs
s2_debiased = e.T @ e / (nobs - nvar)
v = xhat.T @ xhat / nobs
vinv = np.linalg.inv(v)
kappa = 0.99
vk = (x.T @ x * (1 - kappa) + kappa * xhat.T @ xhat) / nobs
return AttrDict(
nobs=nobs,
e=e,
x=x,
y=y,
z=z,
xhat=xhat,
params=params,
s2=s2,
s2_debiased=s2_debiased,
clusters=clusters,
nvar=nvar,
v=v,
vinv=vinv,
vk=vk,
kappa=kappa,
dep=y,
exog=x[:, q:],
endog=x[:, :q],
instr=z,
)
def get_all(v):
attr = [d for d in dir(v) if not d.startswith("_")]
for a in attr:
val = getattr(v, a)
if a in ("conf_int", "durbin", "wu_hausman", "c_stat"):
val()
def test_rank_deficient_exog_exception(data):
exog = data.exog.copy()
exog[:, :2] = 1
with pytest.raises(ValueError):
IV2SLS(data.dep, exog, data.endog, data.instr)
def test_rank_deficient_endog_exception(data):
endog = data.endog.copy()
endog[:, :2] = 1
with pytest.raises(ValueError):
IV2SLS(data.dep, data.exog, endog, data.instr)
with pytest.raises(ValueError):
IV2SLS(data.dep, data.exog, data.exog, data.instr)
def test_invalid_weights_exception(data):
weights = np.zeros_like(data.dep)
with pytest.raises(ValueError):
IV2SLS(data.dep, data.exog, data.endog, data.instr, weights=weights)
def test_rank_deficient_instr_exception(data):
instr = data.instr.copy()
instr[:, :2] = 1
with pytest.raises(ValueError):
IV2SLS(data.dep, data.exog, data.endog, instr)
with pytest.raises(ValueError):
IV2SLS(data.dep, data.exog, data.endog, data.exog)
def test_kappa_error_exception(data):
with pytest.raises(ValueError):
IVLIML(data.dep, data.exog, data.endog, data.instr, kappa=np.array([1]))
def test_fuller_error_exception(data):
with pytest.raises(ValueError):
IVLIML(data.dep, data.exog, data.endog, data.instr, fuller=np.array([1]))
def test_kappa_fuller_warning_exception(data):
with warnings.catch_warnings(record=True) as w:
IVLIML(data.dep, data.exog, data.endog, data.instr, kappa=0.99, fuller=1)
assert len(w) == 1
def test_string_cat_exception(data):
instr = data.instr.copy()
n = data.instr.shape[0]
cat = pd.Series(["a"] * (n // 2) + ["b"] * (n // 2))
instr = | pd.DataFrame(instr) | pandas.DataFrame |
from main_app_v3 import forecast_cases, forecast_cases_active
import pandas as pd
import numpy as np
import os
import warnings
warnings.filterwarnings("ignore")
from bs4 import BeautifulSoup as bs
from datetime import date
from selenium import webdriver
def update(region_dict, driver):
df = pd.read_html(driver.page_source)[0]
df.columns = ["slno", "Region", "Active_Cases", "Active_New", "Cured/Discharged", "Cured_New", "Death", "Deaths_New"]
df["Date"] = [date.today().strftime("%d-%m-%Y") for i in range(len(df))]
df.drop(columns=["slno", "Active_New", "Cured_New", "Deaths_New"], inplace=True)
df = df.iloc[:-4,:]
df.iloc[-1,0] = "India"
df["Confirmed_Cases"] = [df.iloc[i,1:-1].astype(int).sum() for i in range(len(df))]
cols = list(df.columns)
cols = [i.replace(" ","_") for i in cols]
df.columns = cols
#df["Date"] = [date.today().strftime('%Y-%m-%d') for i in range(len(df))]
#df.index = pd.to_datetime(df.Date,format='%Y-%m-%d')
df["Date"] = [date.today().strftime('%d-%m-%Y') for i in range(len(df))]
df.index = pd.to_datetime(df.Date,format='%d-%m-%Y')
for i,region in enumerate(region_dict):
region_dict[region] = region_dict[region].append(df[df.Region == region], sort=False, ignore_index=True)
try:
region_dict[region].index = pd.to_datetime(region_dict[region].Date,format='%d-%m-%Y')
except:
region_dict[region].index = pd.to_datetime(region_dict[region].Date,format='%Y-%m-%d')
return region_dict
def update_database():
df = pd.read_csv("COVID_Database.csv")
cols = list(df.columns)
cols = [i.replace(" ","_") for i in cols]
df.columns = cols
states_df = df.groupby('Region')
state_df = [pd.DataFrame(states_df.get_group(state)) for state in states_df.groups]
#GOOGLE_CHROME_PATH = '/app/.apt/usr/bin/google_chrome'
CHROMEDRIVER_PATH = '/app/.chromedriver/bin/chromedriver'
#selenium setup
chrome_options = webdriver.ChromeOptions()
#chrome_options.binary_location = GOOGLE_CHROME_PATH
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--no-sandbox")
try:
driver=webdriver.Chrome(CHROMEDRIVER_PATH,chrome_options=chrome_options)
except:
driver=webdriver.Chrome(r"C:\Users\Tejas\Downloads\chromedriver_win32\chromedriver.exe",chrome_options=chrome_options)
#driver = webdriver.Chrome(executable_path=os.environ.get("CHROMEDRIVER_PATH"), chrome_options=chrome_options)
driver.get("https://www.mohfw.gov.in/")
region_dict = {}
for region in states_df.groups:
region_dict[region] = pd.DataFrame(states_df.get_group(region))
try:
region_dict[region].index = pd.to_datetime(region_dict[region].Date,format='%Y-%m-%d')
except:
region_dict[region].index = pd.to_datetime(region_dict[region].Date,format='%d-%m-%Y')
region_dict = update(region_dict, driver)
# break
driver.quit()
df = pd.DataFrame()
for region in region_dict.keys():
df = df.append(region_dict[region], sort=False, ignore_index=True)
cols = list(df.columns)
cols = [i.replace(" ","_") for i in cols]
df.columns = cols
df.drop_duplicates(inplace=True)
df.to_csv("COVID_Database.csv", index=False)
return "Database Updated"
def forecasted_database():
df = pd.read_csv("COVID_Database.csv")
df["Forecasted"] = ["" for i in range(len(df))]
regions = sorted(list(df.Region.value_counts().keys()))
df_old = pd.read_csv("Forecast.csv")
forecast_df = pd.DataFrame(columns=['Date', 'Region', 'Confirmed_Cases', 'Active_Cases', 'Cured/Discharged',
'Death', 'Forecasted', 'Forecasted_high', 'Forecasted_low', 'Confirmed_Cases_Daywise', 'Active_Cases_Daywise'])
forecasts = []
forecasts_high = []
forecasts_low = []
forecasts_daywise = []
forecasts_daywise_high = []
forecasts_daywise_low = []
days = []
region_x = []
############
forecasts_active = []
forecasts_active_high = []
forecasts_active_low = []
forecasts_active_daywise = []
forecasts_active_daywise_high = []
forecasts_active_daywise_low = []
n=31
for region in regions:
try:
forecast,std_error,confidence_interval = forecast_cases(region, n=n, return_all=True)
forecast_old = df_old.Forecasted_Cases[df_old.Region==region].iloc[0]
forecast_old = forecast[0]-forecast_old
forecasts_daywise.append(forecast_old)
forecasts_daywise_high.append(forecast_old+(forecast_old*0.05))
forecasts_daywise_low.append(forecast_old-(forecast_old*0.05))
forecast_high = confidence_interval[:,1]
forecast_low = confidence_interval[:,0]
forecasts.extend(forecast)
forecasts_high.extend(forecast_high)
forecasts_low.extend(forecast_low)
forecasts_daywise.extend(pd.Series(forecast).diff(periods=1).iloc[1:])
forecasts_daywise_high.extend(pd.Series(forecast_high).diff(periods=1).iloc[1:])
forecasts_daywise_low.extend(pd.Series(forecast_low).diff(periods=1).iloc[1:])
days.extend(pd.date_range(date.today(), periods=n).strftime('%d-%m-%Y'))
region_x.extend([region for i in range(n)])
######################
forecast_active,std_error_active,confidence_interval_active = forecast_cases_active(region, n=n, return_all=True)
forecast_active_old = df_old.Forecasted_Active_Cases[df_old.Region==region].iloc[0]
forecast_active_old = forecast_active[0]-forecast_active_old
forecasts_active_daywise.append(forecast_active_old)
forecasts_active_daywise_high.append(forecast_active_old+(forecast_active_old*0.05))
forecasts_active_daywise_low.append(forecast_active_old-(forecast_active_old*0.05))
forecast_active_high = confidence_interval_active[:,1]
forecast_active_low = confidence_interval_active[:,0]
forecasts_active.extend(forecast_active)
forecasts_active_high.extend(forecast_active_high)
forecasts_active_low.extend(forecast_active_low)
forecasts_active_daywise.extend(pd.Series(forecast_active).diff(periods=1).iloc[1:])
forecasts_active_daywise_high.extend( | pd.Series(forecast_active_high) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Generalized Additive Models
Author: <NAME>
Author: <NAME>
created on 08/07/2015
"""
from collections.abc import Iterable
import copy # check if needed when dropping python 2.7
import numpy as np
from scipy import optimize
import pandas as pd
import statsmodels.base.wrapper as wrap
from statsmodels.discrete.discrete_model import Logit
from statsmodels.genmod.generalized_linear_model import (
GLM, GLMResults, GLMResultsWrapper, _check_convergence)
import statsmodels.regression.linear_model as lm
# import statsmodels.regression._tools as reg_tools # TODO: use this for pirls
from statsmodels.tools.sm_exceptions import (PerfectSeparationError,
ValueWarning)
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tools.linalg import matrix_sqrt
from statsmodels.base._penalized import PenalizedMixin
from statsmodels.gam.gam_penalties import MultivariateGamPenalty
from statsmodels.gam.gam_cross_validation.gam_cross_validation import (
MultivariateGAMCVPath)
from statsmodels.gam.gam_cross_validation.cross_validators import KFold
def _transform_predict_exog(model, exog, design_info=None):
"""transform exog for predict using design_info
Note: this is copied from base.model.Results.predict and converted to
standalone function with additional options.
"""
is_pandas = _is_using_pandas(exog, None)
exog_index = exog.index if is_pandas else None
if design_info is None:
design_info = getattr(model.data, 'design_info', None)
if design_info is not None and (exog is not None):
from patsy import dmatrix
if isinstance(exog, pd.Series):
# we are guessing whether it should be column or row
if (hasattr(exog, 'name') and isinstance(exog.name, str) and
exog.name in design_info.describe()):
# assume we need one column
exog = pd.DataFrame(exog)
else:
# assume we need a row
exog = pd.DataFrame(exog).T
orig_exog_len = len(exog)
is_dict = isinstance(exog, dict)
exog = dmatrix(design_info, exog, return_type="dataframe")
if orig_exog_len > len(exog) and not is_dict:
import warnings
if exog_index is None:
warnings.warn('nan values have been dropped', ValueWarning)
else:
exog = exog.reindex(exog_index)
exog_index = exog.index
if exog is not None:
exog = np.asarray(exog)
if exog.ndim == 1 and (model.exog.ndim == 1 or
model.exog.shape[1] == 1):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
return exog, exog_index
class GLMGamResults(GLMResults):
"""Results class for generalized additive models, GAM.
This inherits from GLMResults.
Warning: some inherited methods might not correctly take account of the
penalization
GLMGamResults inherits from GLMResults
All methods related to the loglikelihood function return the penalized
values.
Attributes
----------
edf
list of effective degrees of freedom for each column of the design
matrix.
hat_matrix_diag
diagonal of hat matrix
gcv
generalized cross-validation criterion computed as
``gcv = scale / (1. - hat_matrix_trace / self.nobs)**2``
cv
cross-validation criterion computed as
``cv = ((resid_pearson / (1 - hat_matrix_diag))**2).sum() / nobs``
Notes
-----
status: experimental
"""
def __init__(self, model, params, normalized_cov_params, scale, **kwds):
# this is a messy way to compute edf and update scale
# need several attributes to compute edf
self.model = model
self.params = params
self.normalized_cov_params = normalized_cov_params
self.scale = scale
edf = self.edf.sum()
self.df_model = edf - 1 # assume constant
# need to use nobs or wnobs attribute
self.df_resid = self.model.endog.shape[0] - edf
# we are setting the model df for the case when super is using it
# df in model will be incorrect state when alpha/pen_weight changes
self.model.df_model = self.df_model
self.model.df_resid = self.df_resid
mu = self.fittedvalues
self.scale = scale = self.model.estimate_scale(mu)
super(GLMGamResults, self).__init__(model, params,
normalized_cov_params, scale,
**kwds)
def _tranform_predict_exog(self, exog=None, exog_smooth=None,
transform=True):
"""Transform original explanatory variables for prediction
Parameters
----------
exog : array_like, optional
The values for the linear explanatory variables.
exog_smooth : array_like
values for the variables in the smooth terms
transform : bool, optional
If transform is False, then ``exog`` is returned unchanged and
``x`` is ignored. It is assumed that exog contains the full
design matrix for the predict observations.
If transform is True, then the basis representation of the smooth
term will be constructed from the provided ``x``.
Returns
-------
exog_transformed : ndarray
design matrix for the prediction
"""
exog_index = None
if transform is False:
# the following allows that either or both exog are not None
if exog_smooth is None:
# exog could be None or array
ex = exog
else:
if exog is None:
ex = exog_smooth
else:
ex = np.column_stack((exog, exog_smooth))
else:
# transform exog_linear if needed
if exog is not None and hasattr(self.model, 'design_info_linear'):
exog, exog_index = _transform_predict_exog(
self.model, exog, self.model.design_info_linear)
# create smooth basis
if exog_smooth is not None:
ex_smooth = self.model.smoother.transform(exog_smooth)
if exog is None:
ex = ex_smooth
else:
# TODO: there might be problems is exog_smooth is 1-D
ex = np.column_stack((exog, ex_smooth))
else:
ex = exog
return ex, exog_index
def predict(self, exog=None, exog_smooth=None, transform=True, **kwargs):
""""
compute prediction
Parameters
----------
exog : array_like, optional
The values for the linear explanatory variables
exog_smooth : array_like
values for the variables in the smooth terms
transform : bool, optional
If transform is True, then the basis representation of the smooth
term will be constructed from the provided ``exog``.
kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction : ndarray, pandas.Series or pandas.DataFrame
predicted values
"""
ex, exog_index = self._tranform_predict_exog(exog=exog,
exog_smooth=exog_smooth,
transform=transform)
predict_results = super(GLMGamResults, self).predict(ex,
transform=False,
**kwargs)
if exog_index is not None and not hasattr(
predict_results, 'predicted_values'):
if predict_results.ndim == 1:
return pd.Series(predict_results, index=exog_index)
else:
return | pd.DataFrame(predict_results, index=exog_index) | pandas.DataFrame |
import os,glob
import pandas as pd
path = "C:/Users/Fayr/Documents/GitHub/spotifyML/datasets"
files = glob.glob(os.path.join(path, '*.csv'))
df_from_each_file = (pd.read_csv(f, sep=',').iloc[0:30, :] for f in files)
df_merged = | pd.concat(df_from_each_file, ignore_index=True) | pandas.concat |
import json
import requests
import re
import os.path
import datetime
import configparser
from unicodedata import normalize
import pandas as pd
import numpy as np
from pandas.io.json import json_normalize
"""
Get your API access token
1. Create an Yelp account.
2. Create an app (https://www.yelp.com/developers/v3/manage_app).
3. Run this command in your terminal to get yout access_token:
curl -X POST -F 'client_id=YOUR_CLIENT_ID' -F 'client_secret=YOUT_CLIENT_SECRET' https://api.yelp.com/oauth2/token
4. Get your 'access_token' from the response and add to the config.ini file.
"""
def companies():
# Loading reimbursements
docs = pd.read_csv(REIMBURSEMENTS_DATASET_PATH,
low_memory=False,
dtype={'cnpj_cpf': np.str})
# Filtering only congressperson meals
meal_docs = docs[docs.subquota_description == 'Congressperson meal']
# Storing only unique CNPJs
meal_cnpjs = meal_docs['cnpj_cpf'].unique()
# Loading companies
all_companies = pd.read_csv(COMPANIES_DATASET_PATH,
low_memory=False,
dtype={'trade_name': np.str})
all_companies = all_companies[all_companies['trade_name'].notnull()]
# Cleaning up companies CNPJs
all_companies['clean_cnpj'] = \
all_companies['cnpj'].map(lambda cnpj: cnpj.replace(
'.', '').replace('/', '').replace('-', ''))
# Filtering only companies that are in meal reimbursements
return all_companies[all_companies['clean_cnpj'].isin(meal_cnpjs)]
def find_newest_file(name):
date_regex = re.compile('\d{4}-\d{2}-\d{2}')
matches = (date_regex.findall(f) for f in os.listdir(DATA_DIR))
dates = sorted(set([l[0] for l in matches if l]), reverse=True)
for date in dates:
filename = '{}-{}.xz'.format(date, name)
filepath = os.path.join(DATA_DIR, filename)
if os.path.isfile(filepath):
return filepath
def remaining_companies(fetched_companies, companies):
return companies[~companies['cnpj'].isin(fetched_companies['cnpj'])]
def load_companies_dataset():
if os.path.exists(YELP_DATASET_PATH):
return | pd.read_csv(YELP_DATASET_PATH) | pandas.read_csv |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import nose
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, Timestamp, isnull, notnull,
bdate_range, date_range, _np_version_under1p7)
import pandas.core.common as com
from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long
from pandas import compat, to_timedelta, tslib
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_almost_equal,
ensure_clean)
import pandas.util.testing as tm
def _skip_if_numpy_not_friendly():
# not friendly for < 1.7
if _np_version_under1p7:
raise nose.SkipTest("numpy < 1.7")
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_numeric_conversions(self):
_skip_if_numpy_not_friendly()
self.assertEqual(ct(0), np.timedelta64(0,'ns'))
self.assertEqual(ct(10), np.timedelta64(10,'ns'))
self.assertEqual(ct(10,unit='ns'), np.timedelta64(10,'ns').astype('m8[ns]'))
self.assertEqual(ct(10,unit='us'), np.timedelta64(10,'us').astype('m8[ns]'))
self.assertEqual(ct(10,unit='ms'), np.timedelta64(10,'ms').astype('m8[ns]'))
self.assertEqual(ct(10,unit='s'), np.timedelta64(10,'s').astype('m8[ns]'))
self.assertEqual(ct(10,unit='d'), np.timedelta64(10,'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
_skip_if_numpy_not_friendly()
self.assertEqual(ct(timedelta(seconds=1)), np.timedelta64(1,'s').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)), np.timedelta64(1,'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)), np.timedelta64(1,'D').astype('m8[ns]'))
def test_short_format_converters(self):
_skip_if_numpy_not_friendly()
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10,'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10,'ns'))
self.assertEqual(ct('100'), np.timedelta64(100,'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100,'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000,'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000,'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000,'ns'))
self.assertEqual(ct('100ms'), np.timedelta64(100000000,'ns'))
self.assertEqual(ct('1000ms'), np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('-1s'), -np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('1s'), np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('10s'), np.timedelta64(10000000000,'ns'))
self.assertEqual(ct('100s'), np.timedelta64(100000000000,'ns'))
self.assertEqual(ct('1000s'), np.timedelta64(1000000000000,'ns'))
self.assertEqual(ct('1d'), conv(np.timedelta64(1,'D')))
self.assertEqual(ct('-1d'), -conv(np.timedelta64(1,'D')))
self.assertEqual(ct('1D'), conv(np.timedelta64(1,'D')))
self.assertEqual(ct('10D'), conv(np.timedelta64(10,'D')))
self.assertEqual(ct('100D'), conv(np.timedelta64(100,'D')))
self.assertEqual(ct('1000D'), conv(np.timedelta64(1000,'D')))
self.assertEqual(ct('10000D'), conv(np.timedelta64(10000,'D')))
# space
self.assertEqual(ct(' 10000D '), conv(np.timedelta64(10000,'D')))
self.assertEqual(ct(' - 10000D '), -conv(np.timedelta64(10000,'D')))
# invalid
self.assertRaises(ValueError, ct, '1foo')
self.assertRaises(ValueError, ct, 'foo')
def test_full_format_converters(self):
_skip_if_numpy_not_friendly()
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1,'D')
self.assertEqual(ct('1days'), conv(d1))
self.assertEqual(ct('1days,'), conv(d1))
self.assertEqual(ct('- 1days,'), -conv(d1))
self.assertEqual(ct('00:00:01'), conv(np.timedelta64(1,'s')))
self.assertEqual(ct('06:00:01'), conv(np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('06:00:01.0'), conv(np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('06:00:01.01'), conv(np.timedelta64(1000*(6*3600+1)+10,'ms')))
self.assertEqual(ct('- 1days, 00:00:01'), -conv(d1+np.timedelta64(1,'s')))
self.assertEqual(ct('1days, 06:00:01'), conv(d1+np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('1days, 06:00:01.01'), conv(d1+np.timedelta64(1000*(6*3600+1)+10,'ms')))
# invalid
self.assertRaises(ValueError, ct, '- 1days, 00')
def test_nat_converters(self):
_skip_if_numpy_not_friendly()
self.assertEqual(to_timedelta('nat',box=False), tslib.iNaT)
self.assertEqual(to_timedelta('nan',box=False), tslib.iNaT)
def test_to_timedelta(self):
_skip_if_numpy_not_friendly()
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1,'D')
self.assertEqual(to_timedelta('1 days 06:05:01.00003',box=False), conv(d1+np.timedelta64(6*3600+5*60+1,'s')+np.timedelta64(30,'us')))
self.assertEqual(to_timedelta('15.5us',box=False), conv(np.timedelta64(15500,'ns')))
# empty string
result = to_timedelta('',box=False)
self.assertEqual(result, tslib.iNaT)
result = to_timedelta(['', ''])
self.assert_(isnull(result).all())
# pass thru
result = to_timedelta(np.array([np.timedelta64(1,'s')]))
expected = np.array([np.timedelta64(1,'s')])
tm.assert_almost_equal(result,expected)
# ints
result = np.timedelta64(0,'ns')
expected = to_timedelta(0,box=False)
self.assertEqual(result, expected)
# Series
expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])
result = to_timedelta(Series(['1d','1days 00:00:01']))
tm.assert_series_equal(result, expected)
# with units
result = Series([ np.timedelta64(0,'ns'), np.timedelta64(10,'s').astype('m8[ns]') ],dtype='m8[ns]')
expected = to_timedelta([0,10],unit='s')
tm.assert_series_equal(result, expected)
# single element conversion
v = timedelta(seconds=1)
result = to_timedelta(v,box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
v = np.timedelta64(timedelta(seconds=1))
result = to_timedelta(v,box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
def test_to_timedelta_via_apply(self):
_skip_if_numpy_not_friendly()
# GH 5458
expected = Series([np.timedelta64(1,'s')])
result = Series(['00:00:01']).apply(to_timedelta)
tm.assert_series_equal(result, expected)
result = Series([to_timedelta('00:00:01')])
tm.assert_series_equal(result, expected)
def test_timedelta_ops(self):
_skip_if_numpy_not_friendly()
# GH4984
# make sure ops return timedeltas
s = Series([Timestamp('20130101') + timedelta(seconds=i*i) for i in range(10) ])
td = s.diff()
result = td.mean()[0]
# TODO This should have returned a scalar to begin with. Hack for now.
expected = to_timedelta(timedelta(seconds=9))
tm.assert_almost_equal(result, expected)
result = td.quantile(.1)
# This properly returned a scalar.
expected = to_timedelta('00:00:02.6')
tm.assert_almost_equal(result, expected)
result = td.median()[0]
# TODO This should have returned a scalar to begin with. Hack for now.
expected = to_timedelta('00:00:08')
tm.assert_almost_equal(result, expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()[0]
expected = to_timedelta('00:01:21')
tm.assert_almost_equal(result, expected)
def test_to_timedelta_on_missing_values(self):
_skip_if_numpy_not_friendly()
# GH5438
timedelta_NaT = np.timedelta64('NaT')
actual = pd.to_timedelta(Series(['00:00:01', np.nan]))
expected = Series([np.timedelta64(1000000000, 'ns'), timedelta_NaT], dtype='<m8[ns]')
assert_series_equal(actual, expected)
actual = pd.to_timedelta(Series(['00:00:01', pd.NaT]))
assert_series_equal(actual, expected)
actual = pd.to_timedelta(np.nan)
self.assertEqual(actual, timedelta_NaT)
actual = | pd.to_timedelta(pd.NaT) | pandas.to_timedelta |
import os.path
import pickle
import sys
import pandas as pd
import numpy as np
from tqdm import tqdm
import behavelet
import utils2p
import utils2p.synchronization
import flydf
root_dir=os.path.abspath("../..")+'/'
# print(root_dir)
# root_dir="/mnt/data/CLC/"
annotation_dir=root_dir+"Ascending_neuron_screen_analysis_pipeline/01_behavior_annotation_for_behavior_classifier/"
output_dir = root_dir+"Ascending_neuron_screen_analysis_pipeline/output/Fig2_S4-GLM_jangles_legs_beh_DFF/"
position_columns = [
"LF_leg Coxa x",
"LF_leg Coxa y",
"LF_leg Coxa z",
"LF_leg Femur x",
"LF_leg Femur y",
"LF_leg Femur z",
"LF_leg Tibia x",
"LF_leg Tibia y",
"LF_leg Tibia z",
"LF_leg Tarsus x",
"LF_leg Tarsus y",
"LF_leg Tarsus z",
"LF_leg Claw x",
"LF_leg Claw y",
"LF_leg Claw z",
"LM_leg Coxa x",
"LM_leg Coxa y",
"LM_leg Coxa z",
"LM_leg Femur x",
"LM_leg Femur y",
"LM_leg Femur z",
"LM_leg Tibia x",
"LM_leg Tibia y",
"LM_leg Tibia z",
"LM_leg Tarsus x",
"LM_leg Tarsus y",
"LM_leg Tarsus z",
"LM_leg Claw x",
"LM_leg Claw y",
"LM_leg Claw z",
"LH_leg Coxa x",
"LH_leg Coxa y",
"LH_leg Coxa z",
"LH_leg Femur x",
"LH_leg Femur y",
"LH_leg Femur z",
"LH_leg Tibia x",
"LH_leg Tibia y",
"LH_leg Tibia z",
"LH_leg Tarsus x",
"LH_leg Tarsus y",
"LH_leg Tarsus z",
"LH_leg Claw x",
"LH_leg Claw y",
"LH_leg Claw z",
"RF_leg Coxa x",
"RF_leg Coxa y",
"RF_leg Coxa z",
"RF_leg Femur x",
"RF_leg Femur y",
"RF_leg Femur z",
"RF_leg Tibia x",
"RF_leg Tibia y",
"RF_leg Tibia z",
"RF_leg Tarsus x",
"RF_leg Tarsus y",
"RF_leg Tarsus z",
"RF_leg Claw x",
"RF_leg Claw y",
"RF_leg Claw z",
"RM_leg Coxa x",
"RM_leg Coxa y",
"RM_leg Coxa z",
"RM_leg Femur x",
"RM_leg Femur y",
"RM_leg Femur z",
"RM_leg Tibia x",
"RM_leg Tibia y",
"RM_leg Tibia z",
"RM_leg Tarsus x",
"RM_leg Tarsus y",
"RM_leg Tarsus z",
"RM_leg Claw x",
"RM_leg Claw y",
"RM_leg Claw z",
"RH_leg Coxa x",
"RH_leg Coxa y",
"RH_leg Coxa z",
"RH_leg Femur x",
"RH_leg Femur y",
"RH_leg Femur z",
"RH_leg Tibia x",
"RH_leg Tibia y",
"RH_leg Tibia z",
"RH_leg Tarsus x",
"RH_leg Tarsus y",
"RH_leg Tarsus z",
"RH_leg Claw x",
"RH_leg Claw y",
"RH_leg Claw z",
]
angle_columns = [
"LF_leg yaw",
"LF_leg pitch",
"LF_leg roll",
"LF_leg th_fe",
"LF_leg th_ti",
"LF_leg roll_tr",
"LF_leg th_ta",
"LM_leg yaw",
"LM_leg pitch",
"LM_leg roll",
"LM_leg th_fe",
"LM_leg th_ti",
"LM_leg roll_tr",
"LM_leg th_ta",
"LH_leg yaw",
"LH_leg pitch",
"LH_leg roll",
"LH_leg th_fe",
"LH_leg th_ti",
"LH_leg roll_tr",
"LH_leg th_ta",
"RF_leg yaw",
"RF_leg pitch",
"RF_leg roll",
"RF_leg th_fe",
"RF_leg th_ti",
"RF_leg roll_tr",
"RF_leg th_ta",
"RM_leg yaw",
"RM_leg pitch",
"RM_leg roll",
"RM_leg th_fe",
"RM_leg th_ti",
"RM_leg roll_tr",
"RM_leg th_ta",
"RH_leg yaw",
"RH_leg pitch",
"RH_leg roll",
"RH_leg th_fe",
"RH_leg th_ti",
"RH_leg roll_tr",
"RH_leg th_ta",
]
def exp_generator_paper_trials(path):
df = pd.read_pickle(path)
for i, row in df.iterrows():
date = str(row["Date"])
genotype = row["Genotype"]
fly = f"fly{row['Fly']}"
trial = f"{row['Trial']:03}"
yield (date, genotype, fly, trial)
def exp_generator_annotations(path):
df = pd.read_csv(path)
df = df[["Date", "Genotype", "Fly", "Trial"]]
df = df.drop_duplicates()
for i, row in df.iterrows():
date = str(row["Date"])
genotype = row["Genotype"]
fly = f"fly{row['Fly']}"
trial = f"{row['Trial']:03}"
yield (date, genotype, fly, trial)
def beh_dir(date, genotype, fly, trial):
return root_dir+f"Ascending_neuron_screen_analysis_pipeline/00_behavior_data_preprocess/df3dResults_ballRot_captureMeta/{date - 20000000}_{genotype}/Fly{fly}/CO2xzGG/behData_{trial:03d}/"
def sync_dir(date, genotype, fly, trial):
Gal4=genotype.split('-')[0]
return root_dir+f"Ascending_neuron_screen_analysis_pipeline/03_general_2P_exp/{Gal4}/2P/{date}/{genotype}-fly{fly}/{genotype}-fly{fly}-sync-{trial:03d}"
def dir_2p(date, genotype, fly, trial):
Gal4=genotype.split('-')[0]
return root_dir+f"Ascending_neuron_screen_analysis_pipeline/03_general_2P_exp/{Gal4}/2P/{date}/{genotype}-fly{fly}/{genotype}-fly{fly}-{trial:03d}"
def clc_output_dir(date, genotype, fly, trial):
return root_dir+f"Ascending_neuron_screen_analysis_pipeline/03_general_2P_exp/{genotype.split('-')[0]}/2P/{date}/{genotype}-fly{fly}/{genotype}-fly{fly}-{trial:03d}/output"
def get_processed_sync_lines(date, genotype, fly, trial):
Gal4=genotype.split('-')[0]
dir_beh = beh_dir(date, genotype, fly, trial)
dir_sync = sync_dir(date, genotype, fly, trial)
dir2p = dir_2p(date, genotype, fly, trial)
h5_path = utils2p.find_sync_file(dir_sync)
co2_line, cam_line, opt_flow_line, frame_counter, capture_on = utils2p.synchronization.get_lines_from_h5_file(h5_path, ["CO2_Stim", "Basler", "OpFlow", "Frame Counter", "Capture On"])
try:
capture_json = utils2p.find_seven_camera_metadata_file(dir_beh)
except FileNotFoundError:
capture_json = None
metadata_2p = utils2p.find_metadata_file(dir2p)
metadata = utils2p.Metadata(metadata_2p)
cam_line = utils2p.synchronization.process_cam_line(cam_line, capture_json)
opt_flow_line = utils2p.synchronization.process_optical_flow_line(opt_flow_line)
n_flyback_frames = metadata.get_n_flyback_frames()
n_steps = metadata.get_n_z()
frame_counter = utils2p.synchronization.process_frame_counter(frame_counter, steps_per_frame=n_flyback_frames + n_steps)
co2_line = utils2p.synchronization.process_stimulus_line(co2_line)
mask = np.logical_and(capture_on, frame_counter >= 0)
mask = np.logical_and(mask, cam_line >= 0)
co2_line, cam_line, opt_flow_line, frame_counter = utils2p.synchronization.crop_lines(mask, [co2_line, cam_line, opt_flow_line, frame_counter])
optical_flow_path = utils2p.find_optical_flow_file(dir_beh)
optical_flow = utils2p.load_optical_flow(optical_flow_path, 0, 0, 0, 0)
# Ensure all optical flow data was saved and remove frames with missing data
mask = (opt_flow_line < len(optical_flow["time_stamps"]))
co2_line, cam_line, opt_flow_line, frame_counter = utils2p.synchronization.crop_lines(mask, [co2_line, cam_line, opt_flow_line, frame_counter])
return co2_line, cam_line, opt_flow_line, frame_counter
def optical_flow_regressors(frame_counter, opt_flow_line, optical_flow):
pitch = utils2p.synchronization.reduce_during_2p_frame(frame_counter, optical_flow["vel_pitch"][opt_flow_line], np.mean)
roll = utils2p.synchronization.reduce_during_2p_frame(frame_counter, optical_flow["vel_roll"][opt_flow_line], np.mean)
yaw = utils2p.synchronization.reduce_during_2p_frame(frame_counter, optical_flow["vel_yaw"][opt_flow_line], np.mean)
return pitch, roll, yaw
def add_rest_metric(df, columns, name):
n = flydf.number_of_epochs(df)
rest_metric_df = | pd.DataFrame() | pandas.DataFrame |
import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from pytz import timezone
from sklearn.decomposition import KernelPCA
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import MinMaxScaler
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.common_utils import my_scorer
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_3d
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_daily_nwps
class DatasetCreatorPCA:
def __init__(self, project, data=None, n_jobs=1, test=False, dates=None):
if test is None:
raise NotImplemented('test is none for short-term, not implemented for PCA')
self.data = data
self.is_for_test = test
self.project_name = project['_id']
self.static_data = project['static_data']
self.path_nwp_project = self.static_data['pathnwp']
self.path_data = self.static_data['path_data']
self.areas = self.static_data['areas']
self.area_group = self.static_data['area_group']
self.nwp_model = self.static_data['NWP_model']
self.nwp_resolution = self.static_data['NWP_resolution']
self.location = self.static_data['location']
self.compress = True if self.nwp_resolution == 0.05 else False
self.n_jobs = n_jobs
self.variables = self.static_data['data_variables']
self.logger = create_logger(logger_name=f"log_{self.static_data['project_group']}",
abs_path=self.path_nwp_project,
logger_path=f"log_{self.static_data['project_group']}.log", write_type='a')
if self.data is not None:
self.dates = self.check_dates()
elif dates is not None:
self.dates = dates
def check_dates(self):
# Extract dates of power measurements.
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates is checked. Number of time samples %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def make_dataset_res(self):
nwp_3d_pickle = 'nwps_3d_test.pickle' if self.is_for_test else 'nwps_3d.pickle'
dataset_cnn_pickle = 'dataset_cnn_test.pickle' if self.is_for_test else 'dataset_cnn.pickle'
nwp_3d_pickle = os.path.join(self.path_data, nwp_3d_pickle)
dataset_cnn_pickle = os.path.join(self.path_data, dataset_cnn_pickle)
if not (os.path.exists(nwp_3d_pickle) and os.path.exists(dataset_cnn_pickle)):
data, x_3d = self.get_3d_dataset()
else:
data = joblib.load(nwp_3d_pickle)
x_3d = joblib.load(dataset_cnn_pickle) # FIXME: unused variable
data_path = self.path_data
if not isinstance(self.areas, dict):
self.dataset_for_single_farm(data, data_path)
else:
dates_stack = []
for t in self.dates:
p_dates = pd.date_range(t + | pd.DateOffset(hours=25) | pandas.DateOffset |
import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality
from hydroDL import kPath, utils
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
from sklearn.linear_model import LinearRegression
from hydroDL.data import usgs, gageII, gridMET, ntn, transform
import torch
import os
import json
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
startDate = pd.datetime(1979, 1, 1)
endDate = pd.datetime(2020, 1, 1)
sn = 1
wqData = waterQuality.DataModelWQ('nbW')
siteNoLst = wqData.siteNoLst
t0 = time.time()
addF = False
# for addF in [True, False]:
if addF is True:
saveFolder = os.path.join(
kPath.dirWQ, 'modelStat', 'WRTDS-F', 'B16')
else:
saveFolder = os.path.join(
kPath.dirWQ, 'modelStat', 'WRTDS', 'B16')
if not os.path.exists(saveFolder):
os.mkdir(saveFolder)
for kk, siteNo in enumerate(siteNoLst):
print('{}/{} {:.2f}'.format(
kk, len(siteNoLst), time.time()-t0))
saveName = os.path.join(saveFolder, siteNo)
# if os.path.exists(saveName):
# continue
varF = gridMET.varLst+ntn.varLst
varC = usgs.varC
varQ = usgs.varQ
varLst = varF+varC+varQ
df = waterQuality.readSiteTS(siteNo, varLst=varLst, freq='W')
dfX = pd.DataFrame({'date': df.index}).set_index('date')
dfX = dfX.join(np.log(df['00060']+sn)).rename(
columns={'00060': 'logQ'})
if addF is True:
dfX = dfX.join(df[varF])
yr = dfX.index.year.values
t = yr+dfX.index.dayofyear.values/365
dfX['sinT'] = np.sin(2*np.pi*t)
dfX['cosT'] = np.cos(2*np.pi*t)
ind = np.where(yr <= 2016)[0]
dfYP = | pd.DataFrame(index=df.index, columns=varC) | pandas.DataFrame |
import pickle
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler , Normalizer
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from scipy.stats import norm
from scipy import stats
from sklearn import metrics
import warnings
warnings.filterwarnings('ignore')
df = | pd.read_csv("./Cleaned-Data.csv") | pandas.read_csv |
from gams import *
import pandas as pd
import DataBase
import COE
import regex_gms
from DB2Gams import *
class am_base(gams_model_py):
def __init__(self,tree,gams_settings=None):
self.tree = tree
super().__init__(self.tree.database,gsettings=gams_settings, blocks_text = None, functions = None, groups = {}, exceptions = [], exceptions_load = [], components = {}, export_files = None)
def apply_type(self,type_):
return eval(f"COE.{type_}()")
def run_abatement_model(self,repo=os.getcwd(),type_='CES',export_settings=False):
self.define_groups()
self.define_blocks(type_=type_)
self.run_default(repo,export_settings=export_settings)
def define_groups(self,p='p',q='q',mu='mu',sigma='sigma'):
self.p = p
self.q = q
self.mu = mu
self.sigma = sigma
if p not in self.database:
self.database[p] = pd.Series(1,index=self.database[self.tree.setname],name=p)
if q not in self.database:
self.database[q] = pd.Series(1,index=self.database[self.tree.setname],name=q)
if mu not in self.database:
self.database[mu] = pd.Series(0.5,index=self.database[self.tree.mapname],name=mu)
if sigma not in self.database:
self.database[sigma] = pd.Series(0.5, index = self.database[self.tree.aggname],name=sigma)
self.group_tech = {sigma: {'conditions': self.database.get(self.tree.aggname).to_str},
mu : {'conditions': self.database.get(self.tree.mapname).to_str}}
self.group_exo = {p: {'conditions': self.database.get(self.tree.inpname).to_str},
q: {'conditions': self.database.get(self.tree.outname).to_str}}
self.group_endo= {p: {'conditions': self.database.get(self.tree.aggname).to_str},
q: {'conditions': self.database.get(self.tree.sector).to_str+' and not '+self.database.get(self.tree.outname).to_str}}
self.add_group_to_groups(self.group_tech,self.model.name+'_tech')
self.add_group_to_groups(self.group_exo ,self.model.name+'_exo')
self.add_group_to_groups(self.group_endo,self.model.name+'_endo')
self.model.g_endo = [self.model.name+'_endo']
self.model.g_exo = [self.model.name+'_tech', self.model.name+'_exo']
# Arrange variables in types, with alias' etc. that are used to write equations:
n2nn = {self.tree.setname: self.tree.alias}
n2nnn = {self.tree.setname: self.tree.alias2}
nn2n = {self.tree.alias : self.tree.setname}
self.write_vars = { 'q': {'base' : self.database.get(self.q).to_str,
'alias' : self.database.get(self.q,alias_domains=n2nn).to_str,
'alias2': self.database.get(self.q,alias_domains=n2nnn).to_str},
'p': {'base' : self.database.get(self.p).to_str,
'alias' : self.database.get(self.p,alias_domains=n2nn).to_str,
'alias2': self.database.get(self.p,alias_domains=n2nnn).to_str},
'mu':{'base' : self.database.get(self.mu).to_str,
'alias' : self.database.get(self.mu,alias_domains={**n2nn,**nn2n}).to_str,
'alias2': self.database.get(self.mu,alias_domains=n2nnn).to_str},
'sigma':{'base' : self.database.get(self.sigma).to_str,
'alias' : self.database.get(self.sigma,alias_domains=n2nn).to_str,
'alias2': self.database.get(self.sigma,alias_domains=n2nnn).to_str,
'level': self.database.get(self.sigma,level='.l').to_str},
'inputs': { 'base' : self.tree.setname,
'alias' : self.tree.alias,
'alias2': self.tree.alias2},
'in2aggs': {'base' : self.database.get(self.tree.mapname).to_str,
'alias' : self.database.get(self.tree.mapname,alias_domains={**n2nn,**nn2n}).to_str,
'alias2': self.database.get(self.tree.mapname,alias_domains=n2nnn).to_str}
}
def define_blocks(self,type_):
functype = self.apply_type(type_)
self.blocks = """
$BLOCK M_{mname}
{demand_equation}
{price_equation}
$ENDBLOCK
""".format( mname = self.model.name,
demand_equation = functype.equation('demand',f"E_{self.model.name}_q",
self.groups[self.model.name+'_endo'][self.q]['domains'],
self.groups[self.model.name+'_endo'][self.q]['conditions'],
self.write_vars['p'],self.write_vars['q'],self.write_vars['mu'],
self.write_vars['sigma'],self.write_vars['inputs'],self.write_vars['in2aggs']),
price_equation = functype.equation('price_index',f"E_{self.model.name}_p",
self.groups[self.model.name+'_endo'][self.p]['domains'],
self.groups[self.model.name+'_endo'][self.p]['conditions'],
self.write_vars['p'],self.write_vars['q'],self.write_vars['mu'],
self.write_vars['sigma'],self.write_vars['inputs'],self.write_vars['in2aggs']))
self.model.blocks = ['M_'+self.model.name]
class am_base_v2(gams_model_py):
"""
abatement model with quantities/prices defined over different sets.
"""
def __init__(self,tree,gams_settings=None):
self.tree = tree
super().__init__(self.tree.database,gsettings=gams_settings, blocks_text = None, functions = None, groups = {}, exceptions = [], exceptions_load = [], components = {}, export_files = None)
def apply_type(self,type_):
return eval(f"COE.{type_}()")
def run_abatement_model(self,repo=os.getcwd(),type_='CES',export_settings=False,add_aggregates=False):
self.define_groups()
self.define_blocks(type_=type_)
if add_aggregates is True:
self.add_aggregates()
self.run_default(repo,export_settings=export_settings)
def define_groups(self,p='p',q='q',mu='mu',sigma='sigma',eta='eta'):
self.p = p
self.q = q
self.mu = mu
self.sigma = sigma
self.eta = eta
if p not in self.database:
self.database[p] = pd.Series(1,index=self.database[self.tree.p_all],name=p)
if q not in self.database:
self.database[q] = pd.Series(1,index=self.database[self.tree.q_all],name=q)
if mu not in self.database:
self.database[mu] = | pd.Series(0.5,index=self.database[self.tree.mapname],name=mu) | pandas.Series |
#!/usr/bin/env python3
#
# Copyright 2019 <NAME> <<EMAIL>>
#
# This file is part of Salus
# (see https://github.com/SymbioticLab/Salus).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 2 07:38:29 2018
@author: peifeng
"""
from __future__ import print_function, absolute_import, division
import re
from datetime import datetime
from collections import defaultdict
import multiprocessing as mp
from pathlib import Path
import pandas as pd
#import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import plotutils as pu
import compmem as cm
# 2018-09-01 06:22:21.180029: Step 341, loss=3.60 (33.8 examples/sec; 0.740 sec/batch)
ptn_iter = re.compile(r"""(?P<timestamp>.+): \s [sS]tep \s (?P<Step>\d+),\s
(loss|perplexity) .* \(
(?P<Speed>[\d.]+) \s examples/sec; \s
(?P<Duration>[\d.]+) \s sec/batch\)?""", re.VERBOSE)
def parse_iterations(path):
path = Path(path)
iterations = []
with path.open() as f:
for line in f:
line = line.rstrip('\n')
m = ptn_iter.match(line)
if m:
iterations.append(m.groupdict())
df = pd.DataFrame(iterations)
if len(df) == 0:
print(f'File {path} is empty??')
assert len(df) > 0
df['timestamp'] = pd.to_datetime(df['timestamp'])
df['Speed'] = pd.to_numeric(df['Speed'])
df['Step'] = pd.to_numeric(df.Step)
df['Duration'] = | pd.to_numeric(df.Duration) | pandas.to_numeric |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import copy
import warnings
import re
import pandas as pd
pd.set_option('use_inf_as_na', True)
import numpy as np
from joblib import Memory
from xgboost import XGBClassifier
from sklearn import model_selection
from bayes_opt import BayesianOptimization
from sklearn.model_selection import cross_validate
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import classification_report
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from eli5.sklearn import PermutationImportance
from joblib import Parallel, delayed
import multiprocessing
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
# this block of code is for the connection between the server, the database, and the client (plus routing)
# access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []\
global StanceTest
StanceTest = False
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global keepOriginalFeatures
keepOriginalFeatures = []
global XData
XData = []
global yData
yData = []
global XDataNoRemoval
XDataNoRemoval = []
global XDataNoRemovalOrig
XDataNoRemovalOrig = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global finalResultsData
finalResultsData = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerfCrossMutr
allParametersPerfCrossMutr = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 8
#crossValidation = 5
#crossValidation = 3
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global keyFirstTime
keyFirstTime = True
global target_namesLoc
target_namesLoc = []
global featureCompareData
featureCompareData = []
global columnsKeep
columnsKeep = []
global columnsNewGen
columnsNewGen = []
global columnsNames
columnsNames = []
global fileName
fileName = []
global listofTransformations
listofTransformations = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
return 'The reset was done!'
# retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def retrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
global DataResultsRawExternal
global DataRawLengthExternal
global fileName
fileName = []
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global keepOriginalFeatures
keepOriginalFeatures = []
global XData
XData = []
global XDataNoRemoval
XDataNoRemoval = []
global XDataNoRemovalOrig
XDataNoRemovalOrig = []
global previousState
previousState = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global finalResultsData
finalResultsData = []
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerfCrossMutr
allParametersPerfCrossMutr = []
global HistoryPreservation
HistoryPreservation = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 8
#crossValidation = 5
#crossValidation = 3
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global keyFirstTime
keyFirstTime = True
global target_namesLoc
target_namesLoc = []
global featureCompareData
featureCompareData = []
global columnsKeep
columnsKeep = []
global columnsNewGen
columnsNewGen = []
global columnsNames
columnsNames = []
global listofTransformations
listofTransformations = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
target_names.append('Healthy')
target_names.append('Diseased')
elif data['fileName'] == 'biodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
CollectionDBExternal = mongo.db.biodegCExt.find()
target_names.append('Non-biodegr.')
target_names.append('Biodegr.')
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
target_names.append('Negative')
target_names.append('Positive')
elif data['fileName'] == 'MaterialC':
CollectionDB = mongo.db.MaterialC.find()
target_names.append('Cylinder')
target_names.append('Disk')
target_names.append('Flatellipsold')
target_names.append('Longellipsold')
target_names.append('Sphere')
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
target_names.append('No-use')
target_names.append('Long-term')
target_names.append('Short-term')
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
target_names.append('Van')
target_names.append('Car')
target_names.append('Bus')
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
target_names.append('Fine')
target_names.append('Superior')
target_names.append('Inferior')
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
DataResultsRawExternal = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
for index, item in enumerate(CollectionDBExternal):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawExternal.append(item)
DataRawLengthExternal = len(DataResultsRawExternal)
dataSetSelection()
return 'Everything is okay'
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def sendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
global fileName
data = json.loads(fileName)
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
global XDataStoredOriginal
XDataStoredOriginal = XData.copy()
global finalResultsData
finalResultsData = XData.copy()
global XDataNoRemoval
XDataNoRemoval = XData.copy()
global XDataNoRemovalOrig
XDataNoRemovalOrig = XData.copy()
return 'Processed uploaded data set'
def dataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global XDataExternal, yDataExternal
XDataExternal = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResultsExternal = copy.deepcopy(DataResultsRawExternal)
for dictionary in DataResultsRawExternal:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawExternal.sort(key=lambda x: x[target], reverse=True)
DataResultsExternal.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsExternal:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsExternal = [o[target] for o in DataResultsRawExternal]
AllTargetsFloatValuesExternal = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsExternal):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesExternal.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesExternal.append(Class)
previous = value
ArrayDataResultsExternal = pd.DataFrame.from_dict(DataResultsExternal)
XDataExternal, yDataExternal = ArrayDataResultsExternal, AllTargetsFloatValuesExternal
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
global fileName
data = json.loads(fileName)
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
AllTargetsFloatValues.append(Class)
previous = value
dfRaw = pd.DataFrame.from_dict(DataResultsRaw)
# OneTimeTemp = copy.deepcopy(dfRaw)
# OneTimeTemp.drop(columns=['_id', 'InstanceID'])
# column_names = ['volAc', 'chlorides', 'density', 'fixAc' , 'totalSuDi' , 'citAc', 'resSu' , 'pH' , 'sulphates', 'freeSulDi' ,'alcohol', 'quality*']
# OneTimeTemp = OneTimeTemp.reindex(columns=column_names)
# OneTimeTemp.to_csv('dataExport.csv', index=False)
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global keepOriginalFeatures
global OrignList
if (data['fileName'] == 'biodegC'):
keepOriginalFeatures = XData.copy()
storeNewColumns = []
for col in keepOriginalFeatures.columns:
newCol = col.replace("-", "_")
storeNewColumns.append(newCol.replace("_",""))
keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(storeNewColumns)]
columnsNewGen = keepOriginalFeatures.columns.values.tolist()
OrignList = keepOriginalFeatures.columns.values.tolist()
else:
keepOriginalFeatures = XData.copy()
keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(keepOriginalFeatures.columns)]
columnsNewGen = keepOriginalFeatures.columns.values.tolist()
OrignList = keepOriginalFeatures.columns.values.tolist()
XData.columns = ['F'+str(idx+1) for idx, col in enumerate(XData.columns)]
XDataTest.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataTest.columns)]
XDataExternal.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataExternal.columns)]
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
global XDataStoredOriginal
XDataStoredOriginal = XData.copy()
global finalResultsData
finalResultsData = XData.copy()
global XDataNoRemoval
XDataNoRemoval = XData.copy()
global XDataNoRemovalOrig
XDataNoRemovalOrig = XData.copy()
warnings.simplefilter('ignore')
executeModel([], 0, '')
return 'Everything is okay'
def create_global_function():
global estimator
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def estimator(n_estimators, eta, max_depth, subsample, colsample_bytree):
# initialize model
print('loopModels')
n_estimators = int(n_estimators)
max_depth = int(max_depth)
model = XGBClassifier(n_estimators=n_estimators, eta=eta, max_depth=max_depth, subsample=subsample, colsample_bytree=colsample_bytree, n_jobs=-1, random_state=RANDOM_SEED, silent=True, verbosity = 0, use_label_encoder=False)
# set in cross-validation
result = cross_validate(model, XData, yData, cv=crossValidation, scoring='accuracy')
# result is mean of test_score
return np.mean(result['test_score'])
# check this issue later because we are not getting the same results
def executeModel(exeCall, flagEx, nodeTransfName):
global XDataTest, yDataTest
global XDataExternal, yDataExternal
global keyFirstTime
global estimator
global yPredictProb
global scores
global featureImportanceData
global XData
global XDataStored
global previousState
global columnsNewGen
global columnsNames
global listofTransformations
global XDataStoredOriginal
global finalResultsData
global OrignList
global tracker
global XDataNoRemoval
global XDataNoRemovalOrig
columnsNames = []
scores = []
if (len(exeCall) == 0):
if (flagEx == 3):
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
OrignList = columnsNewGen
elif (flagEx == 2):
XData = XDataStored.copy()
XDataStoredOriginal = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
columnsNewGen = OrignList
else:
XData = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
XDataStoredOriginal = XDataStored.copy()
else:
if (flagEx == 4):
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
#XDataStoredOriginal = XDataStored.copy()
elif (flagEx == 2):
XData = XDataStored.copy()
XDataStoredOriginal = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
columnsNewGen = OrignList
else:
XData = XDataStored.copy()
#XDataNoRemoval = XDataNoRemovalOrig.copy()
XDataStoredOriginal = XDataStored.copy()
# Bayesian Optimization CHANGE INIT_POINTS!
if (keyFirstTime):
create_global_function()
params = {"n_estimators": (5, 200), "eta": (0.05, 0.3), "max_depth": (6,12), "subsample": (0.8,1), "colsample_bytree": (0.8,1)}
bayesopt = BayesianOptimization(estimator, params, random_state=RANDOM_SEED)
bayesopt.maximize(init_points=20, n_iter=5, acq='ucb') # 20 and 5
bestParams = bayesopt.max['params']
estimator = XGBClassifier(n_estimators=int(bestParams.get('n_estimators')), eta=bestParams.get('eta'), max_depth=int(bestParams.get('max_depth')), subsample=bestParams.get('subsample'), colsample_bytree=bestParams.get('colsample_bytree'), probability=True, random_state=RANDOM_SEED, silent=True, verbosity = 0, use_label_encoder=False)
columnsNewGen = OrignList
if (len(exeCall) != 0):
if (flagEx == 1):
currentColumnsDeleted = []
for uniqueValue in exeCall:
currentColumnsDeleted.append(tracker[uniqueValue])
for column in XData.columns:
if (column in currentColumnsDeleted):
XData = XData.drop(column, axis=1)
XDataStoredOriginal = XDataStoredOriginal.drop(column, axis=1)
elif (flagEx == 2):
columnsKeepNew = []
columns = XDataGen.columns.values.tolist()
for indx, col in enumerate(columns):
if indx in exeCall:
columnsKeepNew.append(col)
columnsNewGen.append(col)
XDataTemp = XDataGen[columnsKeepNew]
XData[columnsKeepNew] = XDataTemp.values
XDataStoredOriginal[columnsKeepNew] = XDataTemp.values
XDataNoRemoval[columnsKeepNew] = XDataTemp.values
elif (flagEx == 4):
splittedCol = nodeTransfName.split('_')
for col in XDataNoRemoval.columns:
splitCol = col.split('_')
if ((splittedCol[0] in splitCol[0])):
newSplitted = re.sub("[^0-9]", "", splittedCol[0])
newCol = re.sub("[^0-9]", "", splitCol[0])
if (newSplitted == newCol):
storeRenamedColumn = col
XData.rename(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
XDataNoRemoval.rename(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
currentColumn = columnsNewGen[exeCall[0]]
subString = currentColumn[currentColumn.find("(")+1:currentColumn.find(")")]
replacement = currentColumn.replace(subString, nodeTransfName)
for ind, column in enumerate(columnsNewGen):
splitCol = column.split('_')
if ((splittedCol[0] in splitCol[0])):
newSplitted = re.sub("[^0-9]", "", splittedCol[0])
newCol = re.sub("[^0-9]", "", splitCol[0])
if (newSplitted == newCol):
columnsNewGen[ind] = columnsNewGen[ind].replace(storeRenamedColumn, nodeTransfName)
if (len(splittedCol) == 1):
XData[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
XDataNoRemoval[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
else:
if (splittedCol[1] == 'r'):
XData[nodeTransfName] = XData[nodeTransfName].round()
elif (splittedCol[1] == 'b'):
number_of_bins = np.histogram_bin_edges(XData[nodeTransfName], bins='auto')
emptyLabels = []
for index, number in enumerate(number_of_bins):
if (index == 0):
pass
else:
emptyLabels.append(index)
XData[nodeTransfName] = pd.cut(XData[nodeTransfName], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
XData[nodeTransfName] = pd.to_numeric(XData[nodeTransfName], downcast='signed')
elif (splittedCol[1] == 'zs'):
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].mean())/XData[nodeTransfName].std()
elif (splittedCol[1] == 'mms'):
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].min())/(XData[nodeTransfName].max()-XData[nodeTransfName].min())
elif (splittedCol[1] == 'l2'):
dfTemp = []
dfTemp = np.log2(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'l1p'):
dfTemp = []
dfTemp = np.log1p(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'l10'):
dfTemp = []
dfTemp = np.log10(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'e2'):
dfTemp = []
dfTemp = np.exp2(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'em1'):
dfTemp = []
dfTemp = np.expm1(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'p2'):
XData[nodeTransfName] = np.power(XData[nodeTransfName], 2)
elif (splittedCol[1] == 'p3'):
XData[nodeTransfName] = np.power(XData[nodeTransfName], 3)
else:
XData[nodeTransfName] = np.power(XData[nodeTransfName], 4)
XDataNoRemoval[nodeTransfName] = XData[nodeTransfName]
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
columnsNamesLoc = XData.columns.values.tolist()
for col in columnsNamesLoc:
splittedCol = col.split('_')
if (len(splittedCol) == 1):
for tran in listofTransformations:
columnsNames.append(splittedCol[0]+'_'+tran)
else:
for tran in listofTransformations:
if (splittedCol[1] == tran):
columnsNames.append(splittedCol[0])
else:
columnsNames.append(splittedCol[0]+'_'+tran)
featureImportanceData = estimatorFeatureSelection(XDataNoRemoval, estimator)
tracker = []
for value in columnsNewGen:
value = value.split(' ')
if (len(value) > 1):
tracker.append(value[1])
else:
tracker.append(value[0])
estimator.fit(XData, yData)
yPredict = estimator.predict(XData)
yPredictProb = cross_val_predict(estimator, XData, yData, cv=crossValidation, method='predict_proba')
num_cores = multiprocessing.cpu_count()
inputsSc = ['accuracy','precision_weighted','recall_weighted']
flat_results = Parallel(n_jobs=num_cores)(delayed(solve)(estimator,XData,yData,crossValidation,item,index) for index, item in enumerate(inputsSc))
scoresAct = [item for sublist in flat_results for item in sublist]
#print(scoresAct)
# if (StanceTest):
# y_pred = estimator.predict(XDataTest)
# print('Test data set')
# print(classification_report(yDataTest, y_pred))
# y_pred = estimator.predict(XDataExternal)
# print('External data set')
# print(classification_report(yDataExternal, y_pred))
howMany = 0
if (keyFirstTime):
previousState = scoresAct
keyFirstTime = False
howMany = 3
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
finalResultsData = XData.copy()
if (keyFirstTime == False):
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
previousState[0] = scoresAct[0]
previousState[1] = scoresAct[1]
howMany = 3
#elif ((scoresAct[2]-scoresAct[3]) > (previousState[2]-previousState[3])):
previousState[2] = scoresAct[2]
previousState[3] = scoresAct[3]
#howMany = howMany + 1
#elif ((scoresAct[4]-scoresAct[5]) > (previousState[4]-previousState[5])):
previousState[4] = scoresAct[4]
previousState[5] = scoresAct[5]
#howMany = howMany + 1
#else:
#pass
scores = scoresAct + previousState
if (howMany == 3):
scores.append(1)
else:
scores.append(0)
return 'Everything Okay'
@app.route('/data/RequestBestFeatures', methods=["GET", "POST"])
def BestFeat():
global finalResultsData
finalResultsDataJSON = finalResultsData.to_json()
response = {
'finalResultsData': finalResultsDataJSON
}
return jsonify(response)
def featFun (clfLocalPar,DataLocalPar,yDataLocalPar):
PerFeatureAccuracyLocalPar = []
scores = model_selection.cross_val_score(clfLocalPar, DataLocalPar, yDataLocalPar, cv=None, n_jobs=-1)
PerFeatureAccuracyLocalPar.append(scores.mean())
return PerFeatureAccuracyLocalPar
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def estimatorFeatureSelection(Data, clf):
resultsFS = []
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
ImpurityFS = []
RankingFS = []
estim = clf.fit(Data, yData)
importances = clf.feature_importances_
# std = np.std([tree.feature_importances_ for tree in estim.feature_importances_],
# axis=0)
maxList = max(importances)
minList = min(importances)
for f in range(Data.shape[1]):
ImpurityFS.append((importances[f] - minList) / (maxList - minList))
estim = LogisticRegression(n_jobs = -1, random_state=RANDOM_SEED)
selector = RFECV(estimator=estim, n_jobs = -1, step=1, cv=crossValidation)
selector = selector.fit(Data, yData)
RFEImp = selector.ranking_
for f in range(Data.shape[1]):
if (RFEImp[f] == 1):
RankingFS.append(0.95)
elif (RFEImp[f] == 2):
RankingFS.append(0.85)
elif (RFEImp[f] == 3):
RankingFS.append(0.75)
elif (RFEImp[f] == 4):
RankingFS.append(0.65)
elif (RFEImp[f] == 5):
RankingFS.append(0.55)
elif (RFEImp[f] == 6):
RankingFS.append(0.45)
elif (RFEImp[f] == 7):
RankingFS.append(0.35)
elif (RFEImp[f] == 8):
RankingFS.append(0.25)
elif (RFEImp[f] == 9):
RankingFS.append(0.15)
else:
RankingFS.append(0.05)
perm = PermutationImportance(clf, cv=None, refit = True, n_iter = 25).fit(Data, yData)
permList.append(perm.feature_importances_)
n_feats = Data.shape[1]
num_cores = multiprocessing.cpu_count()
print("Parallelization Initilization")
flat_results = Parallel(n_jobs=num_cores)(delayed(featFun)(clf,Data.values[:, i].reshape(-1, 1),yData) for i in range(n_feats))
PerFeatureAccuracy = [item for sublist in flat_results for item in sublist]
# for i in range(n_feats):
# scoresHere = model_selection.cross_val_score(clf, Data.values[:, i].reshape(-1, 1), yData, cv=None, n_jobs=-1)
# PerFeatureAccuracy.append(scoresHere.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
clf.fit(Data, yData)
yPredict = clf.predict(Data)
yPredict = np.nan_to_num(yPredict)
RankingFSDF = pd.DataFrame(RankingFS)
RankingFSDF = RankingFSDF.to_json()
ImpurityFSDF = pd.DataFrame(ImpurityFS)
ImpurityFSDF = ImpurityFSDF.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
if (perm_imp_eli5PD.empty):
for col in Data.columns:
perm_imp_eli5PD.append({0:0})
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=f_classif, k='all')
fit = bestfeatures.fit(Data,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(Data.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
resultsFS.append(featureScores)
resultsFS.append(ImpurityFSDF)
resultsFS.append(perm_imp_eli5PD)
resultsFS.append(PerFeatureAccuracyPandas)
resultsFS.append(RankingFSDF)
return resultsFS
@app.route('/data/sendFeatImp', methods=["GET", "POST"])
def sendFeatureImportance():
global featureImportanceData
response = {
'Importance': featureImportanceData
}
return jsonify(response)
@app.route('/data/sendFeatImpComp', methods=["GET", "POST"])
def sendFeatureImportanceComp():
global featureCompareData
global columnsKeep
response = {
'ImportanceCompare': featureCompareData,
'FeatureNames': columnsKeep
}
return jsonify(response)
def solve(sclf,XData,yData,crossValidation,scoringIn,loop):
scoresLoc = []
temp = model_selection.cross_val_score(sclf, XData, yData, cv=crossValidation, scoring=scoringIn, n_jobs=-1)
scoresLoc.append(temp.mean())
scoresLoc.append(temp.std())
return scoresLoc
@app.route('/data/sendResults', methods=["GET", "POST"])
def sendFinalResults():
global scores
response = {
'ValidResults': scores
}
return jsonify(response)
def Transformation(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5):
# XDataNumericColumn = XData.select_dtypes(include='number')
XDataNumeric = XDataStoredOriginal.select_dtypes(include='number')
columns = list(XDataNumeric)
global packCorrTransformed
packCorrTransformed = []
for count, i in enumerate(columns):
dicTransf = {}
splittedCol = columnsNames[(count)*len(listofTransformations)+0].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = XDataNumericCopy[i].round()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+1].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
number_of_bins = np.histogram_bin_edges(XDataNumericCopy[i], bins='auto')
emptyLabels = []
for index, number in enumerate(number_of_bins):
if (index == 0):
pass
else:
emptyLabels.append(index)
XDataNumericCopy[i] = pd.cut(XDataNumericCopy[i], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
XDataNumericCopy[i] = pd.to_numeric(XDataNumericCopy[i], downcast='signed')
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+2].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].mean())/XDataNumericCopy[i].std()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+3].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].min())/(XDataNumericCopy[i].max()-XDataNumericCopy[i].min())
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+4].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log2(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+5].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log1p(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+6].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log10(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+7].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.exp2(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
if (np.isinf(dfTemp.var())):
flagInf = True
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+8].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.expm1(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
if (np.isinf(dfTemp.var())):
flagInf = True
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+9].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 2)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+10].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 3)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+11].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 4)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
packCorrTransformed.append(dicTransf)
return 'Everything Okay'
def NewComputationTransf(DataRows1, DataRows2, DataRows3, DataRows4, DataRows5, quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, feature, count, flagInf):
corrMatrix1 = DataRows1.corr()
corrMatrix1 = corrMatrix1.abs()
corrMatrix2 = DataRows2.corr()
corrMatrix2 = corrMatrix2.abs()
corrMatrix3 = DataRows3.corr()
corrMatrix3 = corrMatrix3.abs()
corrMatrix4 = DataRows4.corr()
corrMatrix4 = corrMatrix4.abs()
corrMatrix5 = DataRows5.corr()
corrMatrix5 = corrMatrix5.abs()
corrMatrix1 = corrMatrix1.loc[[feature]]
corrMatrix2 = corrMatrix2.loc[[feature]]
corrMatrix3 = corrMatrix3.loc[[feature]]
corrMatrix4 = corrMatrix4.loc[[feature]]
corrMatrix5 = corrMatrix5.loc[[feature]]
DataRows1 = DataRows1.reset_index(drop=True)
DataRows2 = DataRows2.reset_index(drop=True)
DataRows3 = DataRows3.reset_index(drop=True)
DataRows4 = DataRows4.reset_index(drop=True)
DataRows5 = DataRows5.reset_index(drop=True)
targetRows1 = [yData[i] for i in quadrant1]
targetRows2 = [yData[i] for i in quadrant2]
targetRows3 = [yData[i] for i in quadrant3]
targetRows4 = [yData[i] for i in quadrant4]
targetRows5 = [yData[i] for i in quadrant5]
targetRows1Arr = np.array(targetRows1)
targetRows2Arr = np.array(targetRows2)
targetRows3Arr = np.array(targetRows3)
targetRows4Arr = np.array(targetRows4)
targetRows5Arr = np.array(targetRows5)
uniqueTarget1 = unique(targetRows1)
uniqueTarget2 = unique(targetRows2)
uniqueTarget3 = unique(targetRows3)
uniqueTarget4 = unique(targetRows4)
uniqueTarget5 = unique(targetRows5)
if (len(targetRows1Arr) > 0):
onehotEncoder1 = OneHotEncoder(sparse=False)
targetRows1Arr = targetRows1Arr.reshape(len(targetRows1Arr), 1)
onehotEncoder1 = onehotEncoder1.fit_transform(targetRows1Arr)
hotEncoderDF1 = pd.DataFrame(onehotEncoder1)
concatDF1 = pd.concat([DataRows1, hotEncoderDF1], axis=1)
corrMatrixComb1 = concatDF1.corr()
corrMatrixComb1 = corrMatrixComb1.abs()
corrMatrixComb1 = corrMatrixComb1.iloc[:,-len(uniqueTarget1):]
DataRows1 = DataRows1.replace([np.inf, -np.inf], np.nan)
DataRows1 = DataRows1.fillna(0)
X1 = add_constant(DataRows1)
X1 = X1.replace([np.inf, -np.inf], np.nan)
X1 = X1.fillna(0)
VIF1 = pd.Series([variance_inflation_factor(X1.values, i)
for i in range(X1.shape[1])],
index=X1.columns)
if (flagInf == False):
VIF1 = VIF1.replace([np.inf, -np.inf], np.nan)
VIF1 = VIF1.fillna(0)
VIF1 = VIF1.loc[[feature]]
else:
VIF1 = pd.Series()
if ((len(targetRows1Arr) > 2) and (flagInf == False)):
MI1 = mutual_info_classif(DataRows1, targetRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI1List = MI1.tolist()
MI1List = MI1List[count]
else:
MI1List = []
else:
corrMatrixComb1 = pd.DataFrame()
VIF1 = pd.Series()
MI1List = []
if (len(targetRows2Arr) > 0):
onehotEncoder2 = OneHotEncoder(sparse=False)
targetRows2Arr = targetRows2Arr.reshape(len(targetRows2Arr), 1)
onehotEncoder2 = onehotEncoder2.fit_transform(targetRows2Arr)
hotEncoderDF2 = pd.DataFrame(onehotEncoder2)
concatDF2 = pd.concat([DataRows2, hotEncoderDF2], axis=1)
corrMatrixComb2 = concatDF2.corr()
corrMatrixComb2 = corrMatrixComb2.abs()
corrMatrixComb2 = corrMatrixComb2.iloc[:,-len(uniqueTarget2):]
DataRows2 = DataRows2.replace([np.inf, -np.inf], np.nan)
DataRows2 = DataRows2.fillna(0)
X2 = add_constant(DataRows2)
X2 = X2.replace([np.inf, -np.inf], np.nan)
X2 = X2.fillna(0)
VIF2 = pd.Series([variance_inflation_factor(X2.values, i)
for i in range(X2.shape[1])],
index=X2.columns)
if (flagInf == False):
VIF2 = VIF2.replace([np.inf, -np.inf], np.nan)
VIF2 = VIF2.fillna(0)
VIF2 = VIF2.loc[[feature]]
else:
VIF2 = pd.Series()
if ((len(targetRows2Arr) > 2) and (flagInf == False)):
MI2 = mutual_info_classif(DataRows2, targetRows2Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI2List = MI2.tolist()
MI2List = MI2List[count]
else:
MI2List = []
else:
corrMatrixComb2 = pd.DataFrame()
VIF2 = pd.Series()
MI2List = []
if (len(targetRows3Arr) > 0):
onehotEncoder3 = OneHotEncoder(sparse=False)
targetRows3Arr = targetRows3Arr.reshape(len(targetRows3Arr), 1)
onehotEncoder3 = onehotEncoder3.fit_transform(targetRows3Arr)
hotEncoderDF3 = pd.DataFrame(onehotEncoder3)
concatDF3 = pd.concat([DataRows3, hotEncoderDF3], axis=1)
corrMatrixComb3 = concatDF3.corr()
corrMatrixComb3 = corrMatrixComb3.abs()
corrMatrixComb3 = corrMatrixComb3.iloc[:,-len(uniqueTarget3):]
DataRows3 = DataRows3.replace([np.inf, -np.inf], np.nan)
DataRows3 = DataRows3.fillna(0)
X3 = add_constant(DataRows3)
X3 = X3.replace([np.inf, -np.inf], np.nan)
X3 = X3.fillna(0)
if (flagInf == False):
VIF3 = pd.Series([variance_inflation_factor(X3.values, i)
for i in range(X3.shape[1])],
index=X3.columns)
VIF3 = VIF3.replace([np.inf, -np.inf], np.nan)
VIF3 = VIF3.fillna(0)
VIF3 = VIF3.loc[[feature]]
else:
VIF3 = pd.Series()
if ((len(targetRows3Arr) > 2) and (flagInf == False)):
MI3 = mutual_info_classif(DataRows3, targetRows3Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI3List = MI3.tolist()
MI3List = MI3List[count]
else:
MI3List = []
else:
corrMatrixComb3 = pd.DataFrame()
VIF3 = pd.Series()
MI3List = []
if (len(targetRows4Arr) > 0):
onehotEncoder4 = OneHotEncoder(sparse=False)
targetRows4Arr = targetRows4Arr.reshape(len(targetRows4Arr), 1)
onehotEncoder4 = onehotEncoder4.fit_transform(targetRows4Arr)
hotEncoderDF4 = pd.DataFrame(onehotEncoder4)
concatDF4 = pd.concat([DataRows4, hotEncoderDF4], axis=1)
corrMatrixComb4 = concatDF4.corr()
corrMatrixComb4 = corrMatrixComb4.abs()
corrMatrixComb4 = corrMatrixComb4.iloc[:,-len(uniqueTarget4):]
DataRows4 = DataRows4.replace([np.inf, -np.inf], np.nan)
DataRows4 = DataRows4.fillna(0)
X4 = add_constant(DataRows4)
X4 = X4.replace([np.inf, -np.inf], np.nan)
X4 = X4.fillna(0)
if (flagInf == False):
VIF4 = pd.Series([variance_inflation_factor(X4.values, i)
for i in range(X4.shape[1])],
index=X4.columns)
VIF4 = VIF4.replace([np.inf, -np.inf], np.nan)
VIF4 = VIF4.fillna(0)
VIF4 = VIF4.loc[[feature]]
else:
VIF4 = pd.Series()
if ((len(targetRows4Arr) > 2) and (flagInf == False)):
MI4 = mutual_info_classif(DataRows4, targetRows4Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI4List = MI4.tolist()
MI4List = MI4List[count]
else:
MI4List = []
else:
corrMatrixComb4 = pd.DataFrame()
VIF4 = pd.Series()
MI4List = []
if (len(targetRows5Arr) > 0):
onehotEncoder5 = OneHotEncoder(sparse=False)
targetRows5Arr = targetRows5Arr.reshape(len(targetRows5Arr), 1)
onehotEncoder5 = onehotEncoder5.fit_transform(targetRows5Arr)
hotEncoderDF5 = pd.DataFrame(onehotEncoder5)
concatDF5 = pd.concat([DataRows5, hotEncoderDF5], axis=1)
corrMatrixComb5 = concatDF5.corr()
corrMatrixComb5 = corrMatrixComb5.abs()
corrMatrixComb5 = corrMatrixComb5.iloc[:,-len(uniqueTarget5):]
DataRows5 = DataRows5.replace([np.inf, -np.inf], np.nan)
DataRows5 = DataRows5.fillna(0)
X5 = add_constant(DataRows5)
X5 = X5.replace([np.inf, -np.inf], np.nan)
X5 = X5.fillna(0)
if (flagInf == False):
VIF5 = pd.Series([variance_inflation_factor(X5.values, i)
for i in range(X5.shape[1])],
index=X5.columns)
VIF5 = VIF5.replace([np.inf, -np.inf], np.nan)
VIF5 = VIF5.fillna(0)
VIF5 = VIF5.loc[[feature]]
else:
VIF5 = pd.Series()
if ((len(targetRows5Arr) > 2) and (flagInf == False)):
MI5 = mutual_info_classif(DataRows5, targetRows5Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI5List = MI5.tolist()
MI5List = MI5List[count]
else:
MI5List = []
else:
corrMatrixComb5 = pd.DataFrame()
VIF5 = pd.Series()
MI5List = []
if(corrMatrixComb1.empty):
corrMatrixComb1 = pd.DataFrame()
else:
corrMatrixComb1 = corrMatrixComb1.loc[[feature]]
if(corrMatrixComb2.empty):
corrMatrixComb2 = pd.DataFrame()
else:
corrMatrixComb2 = corrMatrixComb2.loc[[feature]]
if(corrMatrixComb3.empty):
corrMatrixComb3 = pd.DataFrame()
else:
corrMatrixComb3 = corrMatrixComb3.loc[[feature]]
if(corrMatrixComb4.empty):
corrMatrixComb4 = pd.DataFrame()
else:
corrMatrixComb4 = corrMatrixComb4.loc[[feature]]
if(corrMatrixComb5.empty):
corrMatrixComb5 = pd.DataFrame()
else:
corrMatrixComb5 = corrMatrixComb5.loc[[feature]]
targetRows1ArrDF = pd.DataFrame(targetRows1Arr)
targetRows2ArrDF = pd.DataFrame(targetRows2Arr)
targetRows3ArrDF = pd.DataFrame(targetRows3Arr)
targetRows4ArrDF = pd.DataFrame(targetRows4Arr)
targetRows5ArrDF = pd.DataFrame(targetRows5Arr)
concatAllDF1 = pd.concat([DataRows1, targetRows1ArrDF], axis=1)
concatAllDF2 = pd.concat([DataRows2, targetRows2ArrDF], axis=1)
concatAllDF3 = pd.concat([DataRows3, targetRows3ArrDF], axis=1)
concatAllDF4 = pd.concat([DataRows4, targetRows4ArrDF], axis=1)
concatAllDF5 = pd.concat([DataRows5, targetRows5ArrDF], axis=1)
corrMatrixCombTotal1 = concatAllDF1.corr()
corrMatrixCombTotal1 = corrMatrixCombTotal1.abs()
corrMatrixCombTotal2 = concatAllDF2.corr()
corrMatrixCombTotal2 = corrMatrixCombTotal2.abs()
corrMatrixCombTotal3 = concatAllDF3.corr()
corrMatrixCombTotal3 = corrMatrixCombTotal3.abs()
corrMatrixCombTotal4 = concatAllDF4.corr()
corrMatrixCombTotal4 = corrMatrixCombTotal4.abs()
corrMatrixCombTotal5 = concatAllDF5.corr()
corrMatrixCombTotal5 = corrMatrixCombTotal5.abs()
corrMatrixCombTotal1 = corrMatrixCombTotal1.loc[[feature]]
corrMatrixCombTotal1 = corrMatrixCombTotal1.iloc[:,-1]
corrMatrixCombTotal2 = corrMatrixCombTotal2.loc[[feature]]
corrMatrixCombTotal2 = corrMatrixCombTotal2.iloc[:,-1]
corrMatrixCombTotal3 = corrMatrixCombTotal3.loc[[feature]]
corrMatrixCombTotal3 = corrMatrixCombTotal3.iloc[:,-1]
corrMatrixCombTotal4 = corrMatrixCombTotal4.loc[[feature]]
corrMatrixCombTotal4 = corrMatrixCombTotal4.iloc[:,-1]
corrMatrixCombTotal5 = corrMatrixCombTotal5.loc[[feature]]
corrMatrixCombTotal5 = corrMatrixCombTotal5.iloc[:,-1]
corrMatrixCombTotal1 = pd.concat([corrMatrixCombTotal1.tail(1)])
corrMatrixCombTotal2 = pd.concat([corrMatrixCombTotal2.tail(1)])
corrMatrixCombTotal3 = pd.concat([corrMatrixCombTotal3.tail(1)])
corrMatrixCombTotal4 = pd.concat([corrMatrixCombTotal4.tail(1)])
corrMatrixCombTotal5 = pd.concat([corrMatrixCombTotal5.tail(1)])
packCorrLoc = []
packCorrLoc.append(corrMatrix1.to_json())
packCorrLoc.append(corrMatrix2.to_json())
packCorrLoc.append(corrMatrix3.to_json())
packCorrLoc.append(corrMatrix4.to_json())
packCorrLoc.append(corrMatrix5.to_json())
packCorrLoc.append(corrMatrixComb1.to_json())
packCorrLoc.append(corrMatrixComb2.to_json())
packCorrLoc.append(corrMatrixComb3.to_json())
packCorrLoc.append(corrMatrixComb4.to_json())
packCorrLoc.append(corrMatrixComb5.to_json())
packCorrLoc.append(corrMatrixCombTotal1.to_json())
packCorrLoc.append(corrMatrixCombTotal2.to_json())
packCorrLoc.append(corrMatrixCombTotal3.to_json())
packCorrLoc.append(corrMatrixCombTotal4.to_json())
packCorrLoc.append(corrMatrixCombTotal5.to_json())
packCorrLoc.append(VIF1.to_json())
packCorrLoc.append(VIF2.to_json())
packCorrLoc.append(VIF3.to_json())
packCorrLoc.append(VIF4.to_json())
packCorrLoc.append(VIF5.to_json())
packCorrLoc.append(json.dumps(MI1List))
packCorrLoc.append(json.dumps(MI2List))
packCorrLoc.append(json.dumps(MI3List))
packCorrLoc.append(json.dumps(MI4List))
packCorrLoc.append(json.dumps(MI5List))
return packCorrLoc
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/thresholdDataSpace', methods=["GET", "POST"])
def Seperation():
thresholds = request.get_data().decode('utf8').replace("'", '"')
thresholds = json.loads(thresholds)
thresholdsPos = thresholds['PositiveValue']
thresholdsNeg = thresholds['NegativeValue']
getCorrectPrediction = []
for index, value in enumerate(yPredictProb):
getCorrectPrediction.append(value[yData[index]]*100)
quadrant1 = []
quadrant2 = []
quadrant3 = []
quadrant4 = []
quadrant5 = []
probabilityPredictions = []
for index, value in enumerate(getCorrectPrediction):
if (value > 50 and value > thresholdsPos):
quadrant1.append(index)
elif (value > 50 and value <= thresholdsPos):
quadrant2.append(index)
elif (value <= 50 and value > thresholdsNeg):
quadrant3.append(index)
else:
quadrant4.append(index)
quadrant5.append(index)
probabilityPredictions.append(value)
# Main Features
DataRows1 = XData.iloc[quadrant1, :]
DataRows2 = XData.iloc[quadrant2, :]
DataRows3 = XData.iloc[quadrant3, :]
DataRows4 = XData.iloc[quadrant4, :]
DataRows5 = XData.iloc[quadrant5, :]
Transformation(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5)
corrMatrix1 = DataRows1.corr()
corrMatrix1 = corrMatrix1.abs()
corrMatrix2 = DataRows2.corr()
corrMatrix2 = corrMatrix2.abs()
corrMatrix3 = DataRows3.corr()
corrMatrix3 = corrMatrix3.abs()
corrMatrix4 = DataRows4.corr()
corrMatrix4 = corrMatrix4.abs()
corrMatrix5 = DataRows5.corr()
corrMatrix5 = corrMatrix5.abs()
DataRows1 = DataRows1.reset_index(drop=True)
DataRows2 = DataRows2.reset_index(drop=True)
DataRows3 = DataRows3.reset_index(drop=True)
DataRows4 = DataRows4.reset_index(drop=True)
DataRows5 = DataRows5.reset_index(drop=True)
targetRows1 = [yData[i] for i in quadrant1]
targetRows2 = [yData[i] for i in quadrant2]
targetRows3 = [yData[i] for i in quadrant3]
targetRows4 = [yData[i] for i in quadrant4]
targetRows5 = [yData[i] for i in quadrant5]
targetRows1Arr = np.array(targetRows1)
targetRows2Arr = np.array(targetRows2)
targetRows3Arr = np.array(targetRows3)
targetRows4Arr = np.array(targetRows4)
targetRows5Arr = np.array(targetRows5)
uniqueTarget1 = unique(targetRows1)
uniqueTarget2 = unique(targetRows2)
uniqueTarget3 = unique(targetRows3)
uniqueTarget4 = unique(targetRows4)
uniqueTarget5 = unique(targetRows5)
if (len(targetRows1Arr) > 0):
onehotEncoder1 = OneHotEncoder(sparse=False)
targetRows1Arr = targetRows1Arr.reshape(len(targetRows1Arr), 1)
onehotEncoder1 = onehotEncoder1.fit_transform(targetRows1Arr)
hotEncoderDF1 = pd.DataFrame(onehotEncoder1)
concatDF1 = pd.concat([DataRows1, hotEncoderDF1], axis=1)
corrMatrixComb1 = concatDF1.corr()
corrMatrixComb1 = corrMatrixComb1.abs()
corrMatrixComb1 = corrMatrixComb1.iloc[:,-len(uniqueTarget1):]
DataRows1 = DataRows1.replace([np.inf, -np.inf], np.nan)
DataRows1 = DataRows1.fillna(0)
X1 = add_constant(DataRows1)
X1 = X1.replace([np.inf, -np.inf], np.nan)
X1 = X1.fillna(0)
VIF1 = pd.Series([variance_inflation_factor(X1.values, i)
for i in range(X1.shape[1])],
index=X1.columns)
VIF1 = VIF1.replace([np.inf, -np.inf], np.nan)
VIF1 = VIF1.fillna(0)
if (len(targetRows1Arr) > 2):
MI1 = mutual_info_classif(DataRows1, targetRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI1List = MI1.tolist()
else:
MI1List = []
else:
corrMatrixComb1 = pd.DataFrame()
VIF1 = pd.Series()
MI1List = []
if (len(targetRows2Arr) > 0):
onehotEncoder2 = OneHotEncoder(sparse=False)
targetRows2Arr = targetRows2Arr.reshape(len(targetRows2Arr), 1)
onehotEncoder2 = onehotEncoder2.fit_transform(targetRows2Arr)
hotEncoderDF2 = pd.DataFrame(onehotEncoder2)
concatDF2 = pd.concat([DataRows2, hotEncoderDF2], axis=1)
corrMatrixComb2 = concatDF2.corr()
corrMatrixComb2 = corrMatrixComb2.abs()
corrMatrixComb2 = corrMatrixComb2.iloc[:,-len(uniqueTarget2):]
DataRows2 = DataRows2.replace([np.inf, -np.inf], np.nan)
DataRows2 = DataRows2.fillna(0)
X2 = add_constant(DataRows2)
X2 = X2.replace([np.inf, -np.inf], np.nan)
X2 = X2.fillna(0)
VIF2 = pd.Series([variance_inflation_factor(X2.values, i)
for i in range(X2.shape[1])],
index=X2.columns)
VIF2 = VIF2.replace([np.inf, -np.inf], np.nan)
VIF2 = VIF2.fillna(0)
if (len(targetRows2Arr) > 2):
MI2 = mutual_info_classif(DataRows2, targetRows2Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI2List = MI2.tolist()
else:
MI2List = []
else:
corrMatrixComb2 = pd.DataFrame()
VIF2 = pd.Series()
MI2List = []
if (len(targetRows3Arr) > 0):
onehotEncoder3 = OneHotEncoder(sparse=False)
targetRows3Arr = targetRows3Arr.reshape(len(targetRows3Arr), 1)
onehotEncoder3 = onehotEncoder3.fit_transform(targetRows3Arr)
hotEncoderDF3 = pd.DataFrame(onehotEncoder3)
concatDF3 = pd.concat([DataRows3, hotEncoderDF3], axis=1)
corrMatrixComb3 = concatDF3.corr()
corrMatrixComb3 = corrMatrixComb3.abs()
corrMatrixComb3 = corrMatrixComb3.iloc[:,-len(uniqueTarget3):]
DataRows3 = DataRows3.replace([np.inf, -np.inf], np.nan)
DataRows3 = DataRows3.fillna(0)
X3 = add_constant(DataRows3)
X3 = X3.replace([np.inf, -np.inf], np.nan)
X3 = X3.fillna(0)
VIF3 = pd.Series([variance_inflation_factor(X3.values, i)
for i in range(X3.shape[1])],
index=X3.columns)
VIF3 = VIF3.replace([np.inf, -np.inf], np.nan)
VIF3 = VIF3.fillna(0)
if (len(targetRows3Arr) > 2):
MI3 = mutual_info_classif(DataRows3, targetRows3Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI3List = MI3.tolist()
else:
MI3List = []
else:
corrMatrixComb3 = pd.DataFrame()
VIF3 = pd.Series()
MI3List = []
if (len(targetRows4Arr) > 0):
onehotEncoder4 = OneHotEncoder(sparse=False)
targetRows4Arr = targetRows4Arr.reshape(len(targetRows4Arr), 1)
onehotEncoder4 = onehotEncoder4.fit_transform(targetRows4Arr)
hotEncoderDF4 = pd.DataFrame(onehotEncoder4)
concatDF4 = pd.concat([DataRows4, hotEncoderDF4], axis=1)
corrMatrixComb4 = concatDF4.corr()
corrMatrixComb4 = corrMatrixComb4.abs()
corrMatrixComb4 = corrMatrixComb4.iloc[:,-len(uniqueTarget4):]
DataRows4 = DataRows4.replace([np.inf, -np.inf], np.nan)
DataRows4 = DataRows4.fillna(0)
X4 = add_constant(DataRows4)
X4 = X4.replace([np.inf, -np.inf], np.nan)
X4 = X4.fillna(0)
VIF4 = pd.Series([variance_inflation_factor(X4.values, i)
for i in range(X4.shape[1])],
index=X4.columns)
VIF4 = VIF4.replace([np.inf, -np.inf], np.nan)
VIF4 = VIF4.fillna(0)
if (len(targetRows4Arr) > 2):
MI4 = mutual_info_classif(DataRows4, targetRows4Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI4List = MI4.tolist()
else:
MI4List = []
else:
corrMatrixComb4 = pd.DataFrame()
VIF4 = pd.Series()
MI4List = []
if (len(targetRows5Arr) > 0):
onehotEncoder5 = OneHotEncoder(sparse=False)
targetRows5Arr = targetRows5Arr.reshape(len(targetRows5Arr), 1)
onehotEncoder5 = onehotEncoder5.fit_transform(targetRows5Arr)
hotEncoderDF5 = pd.DataFrame(onehotEncoder5)
concatDF5 = pd.concat([DataRows5, hotEncoderDF5], axis=1)
corrMatrixComb5 = concatDF5.corr()
corrMatrixComb5 = corrMatrixComb5.abs()
corrMatrixComb5 = corrMatrixComb5.iloc[:,-len(uniqueTarget5):]
DataRows5 = DataRows5.replace([np.inf, -np.inf], np.nan)
DataRows5 = DataRows5.fillna(0)
X5 = add_constant(DataRows5)
X5 = X5.replace([np.inf, -np.inf], np.nan)
X5 = X5.fillna(0)
VIF5 = pd.Series([variance_inflation_factor(X5.values, i)
for i in range(X5.shape[1])],
index=X5.columns)
VIF5 = VIF5.replace([np.inf, -np.inf], np.nan)
VIF5 = VIF5.fillna(0)
if (len(targetRows5Arr) > 2):
MI5 = mutual_info_classif(DataRows5, targetRows5Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI5List = MI5.tolist()
else:
MI5List = []
else:
corrMatrixComb5 = pd.DataFrame()
VIF5 = pd.Series()
MI5List = []
targetRows1ArrDF = pd.DataFrame(targetRows1Arr)
targetRows2ArrDF = pd.DataFrame(targetRows2Arr)
targetRows3ArrDF = | pd.DataFrame(targetRows3Arr) | pandas.DataFrame |
"""
This module contains all US-specific data loading and data cleaning routines.
"""
import requests
import pandas as pd
import numpy as np
idx = pd.IndexSlice
def get_raw_covidtracking_data():
""" Gets the current daily CSV from COVIDTracking """
url = "https://covidtracking.com/api/v1/states/daily.csv"
data = pd.read_csv(url)
return data
def process_covidtracking_data(data: pd.DataFrame, run_date: pd.Timestamp):
""" Processes raw COVIDTracking data to be in a form for the GenerativeModel.
In many cases, we need to correct data errors or obvious outliers."""
data = data.rename(columns={"state": "region"})
data["date"] = pd.to_datetime(data["date"], format="%Y%m%d")
data = data.set_index(["region", "date"]).sort_index()
data = data[["positive", "total"]]
# Too little data or unreliable reporting in the data source.
data = data.drop(["MP", "GU", "AS", "PR", "VI"])
# On Jun 5 Covidtracking started counting probable cases too
# which increases the amount by 5014.
# https://covidtracking.com/screenshots/MI/MI-20200605-184320.png
data.loc[idx["MI", pd.Timestamp("2020-06-05") :], "positive"] -= 5014
# From CT: On June 19th, LDH removed 1666 duplicate and non resident cases
# after implementing a new de-duplicaton process.
data.loc[idx["LA", pd.Timestamp("2020-06-19") :], :] += 1666
# Now work with daily counts
data = data.diff().dropna().clip(0, None).sort_index()
# Michigan missed 6/18 totals and lumped them into 6/19 so we've
# divided the totals in two and equally distributed to both days.
data.loc[idx["MI", pd.Timestamp("2020-06-18")], "total"] = 14871
data.loc[idx["MI", pd.Timestamp("2020-06-19")], "total"] = 14871
# Note that when we set total to zero, the model ignores that date. See
# the likelihood function in GenerativeModel.build
# Huge outlier in NJ causing sampling issues.
data.loc[idx["NJ", pd.Timestamp("2020-05-11")], :] = 0
# Huge outlier in CA causing sampling issues.
data.loc[idx["CA", pd.Timestamp("2020-04-22")], :] = 0
# Huge outlier in CA causing sampling issues.
# TODO: generally should handle when # tests == # positives and that
# is not an indication of positive rate.
data.loc[idx["SC", pd.Timestamp("2020-06-26")], :] = 0
# Two days of no new data then lumped sum on third day with lack of new total tests
data.loc[idx["OR", pd.Timestamp("2020-06-26") : pd.Timestamp("2020-06-28")], 'positive'] = 174
data.loc[idx["OR", | pd.Timestamp("2020-06-26") | pandas.Timestamp |
# pylint: disable-msg=E1101,W0612
from __future__ import with_statement # for Python 2.5
from datetime import datetime, time, timedelta
import sys
import os
import unittest
import nose
import numpy as np
from pandas import (Index, Series, TimeSeries, DataFrame, isnull,
date_range, Timestamp)
from pandas import DatetimeIndex, Int64Index, to_datetime
from pandas.core.daterange import DateRange
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
from pandas.tseries.index import bdate_range, date_range
import pandas.tseries.tools as tools
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
import pandas.lib as lib
import cPickle as pickle
import pandas.core.datetools as dt
from numpy.random import rand
from pandas.util.testing import assert_frame_equal
import pandas.util.py3compat as py3compat
from pandas.core.datetools import BDay
import pandas.core.common as com
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest
try:
import pytz
except ImportError:
pass
class TestTimeZoneSupport(unittest.TestCase):
def setUp(self):
_skip_if_no_pytz()
def test_utc_to_local_no_modify(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert('US/Eastern')
# Values are unmodified
self.assert_(np.array_equal(rng.asi8, rng_eastern.asi8))
self.assert_(rng_eastern.tz == pytz.timezone('US/Eastern'))
def test_localize_utc_conversion(self):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range('3/10/2012', '3/11/2012', freq='30T')
converted = rng.tz_localize('US/Eastern')
expected_naive = rng + offsets.Hour(5)
self.assert_(np.array_equal(converted.asi8, expected_naive.asi8))
# DST ambiguity, this should fail
rng = date_range('3/11/2012', '3/12/2012', freq='30T')
self.assertRaises(Exception, rng.tz_localize, 'US/Eastern')
def test_tz_localize_dti(self):
from pandas.tseries.offsets import Hour
dti = DatetimeIndex(start='1/1/2005', end='1/1/2005 0:00:30.256',
freq='L')
dti2 = dti.tz_localize('US/Eastern')
dti_utc = DatetimeIndex(start='1/1/2005 05:00',
end='1/1/2005 5:00:30.256', freq='L',
tz='utc')
self.assert_(np.array_equal(dti2.values, dti_utc.values))
dti3 = dti2.tz_convert('US/Pacific')
self.assert_(np.array_equal(dti3.values, dti_utc.values))
dti = DatetimeIndex(start='11/6/2011 1:59',
end='11/6/2011 2:00', freq='L')
self.assertRaises(pytz.AmbiguousTimeError, dti.tz_localize,
'US/Eastern')
dti = DatetimeIndex(start='3/13/2011 1:59', end='3/13/2011 2:00',
freq='L')
self.assertRaises(pytz.AmbiguousTimeError, dti.tz_localize,
'US/Eastern')
def test_utc_box_timestamp_and_localize(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert('US/Eastern')
tz = pytz.timezone('US/Eastern')
expected = tz.normalize(rng[-1])
stamp = rng_eastern[-1]
self.assertEquals(stamp, expected)
self.assertEquals(stamp.tzinfo, expected.tzinfo)
# right tzinfo
rng = date_range('3/13/2012', '3/14/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert('US/Eastern')
self.assert_('EDT' in repr(rng_eastern[0].tzinfo))
def test_timestamp_tz_convert(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
idx = DatetimeIndex(strdates, tz='US/Eastern')
conv = idx[0].tz_convert('US/Pacific')
expected = idx.tz_convert('US/Pacific')[0]
self.assertEquals(conv, expected)
def test_pass_dates_convert_to_utc(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
idx = DatetimeIndex(strdates)
conv = idx.tz_convert('US/Eastern')
fromdates = DatetimeIndex(strdates, tz='US/Eastern')
self.assert_(conv.tz == fromdates.tz)
self.assert_(np.array_equal(conv.values, fromdates.values))
def test_field_access_localize(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
rng = DatetimeIndex(strdates, tz='US/Eastern')
self.assert_((rng.hour == 0).all())
def test_with_tz(self):
tz = pytz.timezone('US/Central')
# just want it to work
start = datetime(2011, 3, 12, tzinfo=pytz.utc)
dr = bdate_range(start, periods=50, freq=datetools.Hour())
self.assert_(dr.tz is pytz.utc)
# DateRange with naive datetimes
dr = bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc)
dr = bdate_range('1/1/2005', '1/1/2009', tz=tz)
# normalized
central = dr.tz_convert(tz)
self.assert_(central.tz is tz)
self.assert_(central[0].tz is tz)
# datetimes with tzinfo set
dr = bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc),
'1/1/2009', tz=pytz.utc)
self.assertRaises(Exception, bdate_range,
datetime(2005, 1, 1, tzinfo=pytz.utc),
'1/1/2009', tz=tz)
def test_tz_localize(self):
dr = bdate_range('1/1/2009', '1/1/2010')
dr_utc = bdate_range('1/1/2009', '1/1/2010', tz=pytz.utc)
localized = dr.tz_localize(pytz.utc)
self.assert_(np.array_equal(dr_utc, localized))
def test_with_tz_ambiguous_times(self):
tz = pytz.timezone('US/Eastern')
rng = bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1))
# regular no problem
self.assert_(rng.tz_validate())
# March 13, 2011, spring forward, skip from 2 AM to 3 AM
dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3,
freq=datetools.Hour())
self.assertRaises(pytz.AmbiguousTimeError, dr.tz_localize, tz)
# after dst transition, it works
dr = date_range(datetime(2011, 3, 13, 3, 30), periods=3,
freq=datetools.Hour(), tz=tz)
# November 6, 2011, fall back, repeat 2 AM hour
dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3,
freq=datetools.Hour())
self.assertRaises(pytz.AmbiguousTimeError, dr.tz_localize, tz)
# UTC is OK
dr = date_range(datetime(2011, 3, 13), periods=48,
freq=datetools.Minute(30), tz=pytz.utc)
# test utility methods
def test_infer_tz(self):
eastern = pytz.timezone('US/Eastern')
utc = pytz.utc
_start = datetime(2001, 1, 1)
_end = datetime(2009, 1, 1)
start = eastern.localize(_start)
end = eastern.localize(_end)
assert(tools._infer_tzinfo(start, end) is eastern)
assert(tools._infer_tzinfo(start, None) is eastern)
assert(tools._infer_tzinfo(None, end) is eastern)
start = utc.localize(_start)
end = utc.localize(_end)
assert(tools._infer_tzinfo(start, end) is utc)
end = eastern.localize(_end)
self.assertRaises(Exception, tools._infer_tzinfo, start, end)
self.assertRaises(Exception, tools._infer_tzinfo, end, start)
def test_asobject_tz_box(self):
tz = pytz.timezone('US/Eastern')
index = DatetimeIndex(start='1/1/2005', periods=10, tz=tz,
freq='B')
result = index.asobject
self.assert_(result[0].tz is tz)
def test_tz_string(self):
result = date_range('1/1/2000', periods=10, tz='US/Eastern')
expected = date_range('1/1/2000', periods=10,
tz=pytz.timezone('US/Eastern'))
self.assert_(result.equals(expected))
def test_take_dont_lose_meta(self):
_skip_if_no_pytz()
rng = date_range('1/1/2000', periods=20, tz='US/Eastern')
result = rng.take(range(5))
self.assert_(result.tz == rng.tz)
self.assert_(result.freq == rng.freq)
def test_index_with_timezone_repr(self):
rng = date_range('4/13/2010', '5/6/2010')
rng_eastern = rng.tz_convert('US/Eastern')
rng_repr = repr(rng)
self.assert_('2010-04-13 00:00:00' in rng_repr)
class TestTimeZones(unittest.TestCase):
def setUp(self):
_skip_if_no_pytz()
def test_index_equals_with_tz(self):
left = date_range('1/1/2011', periods=100, freq='H', tz='utc')
right = date_range('1/1/2011', periods=100, freq='H',
tz='US/Eastern')
self.assert_(not left.equals(right))
def test_tz_convert_naive(self):
rng = date_range('1/1/2011', periods=100, freq='H')
conv = rng.tz_convert('US/Pacific')
exp = rng.tz_localize('US/Pacific')
self.assert_(conv.equals(exp))
def test_tz_convert(self):
rng = | date_range('1/1/2011', periods=100, freq='H') | pandas.tseries.index.date_range |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = | pd.Series(["a", "b", "c"], dtype="category") | pandas.Series |
from __future__ import print_function
import sys
from collections import defaultdict
try:
import mkl
mkl.set_num_threads(1)
except ImportError:
pass
import pandas as pd
import numpy as np
import pyranges as pr
import pkg_resources
from pyranges.pyranges import PyRanges
from pyranges import data
from pyranges.methods.concat import concat
from pyrle import PyRles, Rle
from pyranges.version import __version__
from natsort import natsorted
get_example_path = data.get_example_path
from pyranges.multioverlap import count_overlaps
def from_dict(d, int64=False):
"""Create a PyRanges from dict.
Parameters
----------
d : dict of array-like
Dict with data.
int64 : bool, default False.
Whether to use 64-bit integers for starts and ends.
Warning
-------
On versions of Python prior to 3.6, this function returns a PyRanges with
the columns in arbitrary order.
See Also
--------
pyranges.from_string : create a PyRanges from a multiline string.
Examples
--------
>>> d = {"Chromosome": [1, 1, 2], "Start": [1, 2, 3], "End": [4, 9, 12], "Strand": ["+", "+", "-"], "ArbitraryValue": ["a", "b", "c"]}
>>> pr.from_dict(d)
+--------------+-----------+-----------+--------------+------------------+
| Chromosome | Start | End | Strand | ArbitraryValue |
| (category) | (int32) | (int32) | (category) | (object) |
|--------------+-----------+-----------+--------------+------------------|
| 1 | 1 | 4 | + | a |
| 1 | 2 | 9 | + | b |
| 2 | 3 | 12 | - | c |
+--------------+-----------+-----------+--------------+------------------+
Stranded PyRanges object has 3 rows and 5 columns from 2 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
return PyRanges( | pd.DataFrame(d) | pandas.DataFrame |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
def test_default_index(self):
# is_series and ignore_index
s1 = Series([1, 2, 3], name="x")
s2 = Series([4, 5, 6], name="y")
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
# use check_index_type=True to check the result have
# RangeIndex (default index)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_series and all inputs have no names
s1 = Series([1, 2, 3])
s2 = Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
exp.columns = pd.RangeIndex(2)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_dataframe and ignore_index
df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
res = pd.concat([df1, df2], axis=0, ignore_index=True)
exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
res = pd.concat([df1, df2], axis=1, ignore_index=True)
exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
def test_concat_multiindex_rangeindex(self):
# GH13542
# when multi-index levels are RangeIndex objects
# there is a bug in concat with objects of len 1
df = DataFrame(np.random.randn(9, 2))
df.index = MultiIndex(
levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],
)
res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
exp = df.iloc[[2, 3, 4, 5], :]
tm.assert_frame_equal(res, exp)
def test_concat_multiindex_dfs_with_deepcopy(self):
# GH 9967
from copy import deepcopy
example_multiindex1 = pd.MultiIndex.from_product([["a"], ["b"]])
example_dataframe1 = DataFrame([0], index=example_multiindex1)
example_multiindex2 = pd.MultiIndex.from_product([["a"], ["c"]])
example_dataframe2 = DataFrame([1], index=example_multiindex2)
example_dict = {"s1": example_dataframe1, "s2": example_dataframe2}
expected_index = pd.MultiIndex(
levels=[["s1", "s2"], ["a"], ["b", "c"]],
codes=[[0, 1], [0, 0], [0, 1]],
names=["testname", None, None],
)
expected = DataFrame([[0], [1]], index=expected_index)
result_copy = pd.concat(deepcopy(example_dict), names=["testname"])
tm.assert_frame_equal(result_copy, expected)
result_no_copy = pd.concat(example_dict, names=["testname"])
tm.assert_frame_equal(result_no_copy, expected)
def test_categorical_concat_append(self):
cat = Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1]))
tm.assert_frame_equal(pd.concat([df, df]), exp)
tm.assert_frame_equal(df.append(df), exp)
# GH 13524 can concat different categories
cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
res = pd.concat([df, df_different_categories], ignore_index=True)
exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]})
tm.assert_frame_equal(res, exp)
res = df.append(df_different_categories, ignore_index=True)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_dtypes(self):
# GH8143
index = ["cat", "obj", "num"]
cat = Categorical(["a", "b", "c"])
obj = Series(["a", "b", "c"])
num = Series([1, 2, 3])
df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == "object"
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "int64"
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "category"
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_categorical_concat(self, sort):
# See GH 10177
df1 = DataFrame(
np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"]
)
df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"])
cat_values = ["one", "one", "two", "one", "two", "two", "one"]
df2["h"] = Series(Categorical(cat_values))
res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort)
exp = DataFrame(
{
"a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
"b": [
1,
4,
7,
10,
13,
16,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
"h": [None] * 6 + cat_values,
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_gh7864(self):
# GH 7864
# make sure ordering is preserved
df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list("abbaae")})
df["grade"] = Categorical(df["raw_grade"])
df["grade"].cat.set_categories(["e", "a", "b"])
df1 = df[0:3]
df2 = df[3:]
tm.assert_index_equal(df["grade"].cat.categories, df1["grade"].cat.categories)
tm.assert_index_equal(df["grade"].cat.categories, df2["grade"].cat.categories)
dfx = pd.concat([df1, df2])
tm.assert_index_equal(df["grade"].cat.categories, dfx["grade"].cat.categories)
dfa = df1.append(df2)
tm.assert_index_equal(df["grade"].cat.categories, dfa["grade"].cat.categories)
def test_categorical_concat_preserve(self):
# GH 8641 series concat not preserving category dtype
# GH 13524 can concat different categories
s = Series(list("abc"), dtype="category")
s2 = Series(list("abd"), dtype="category")
exp = Series(list("abcabd"))
res = pd.concat([s, s2], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), dtype="category")
res = pd.concat([s, s], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), index=[0, 1, 2, 0, 1, 2], dtype="category")
res = pd.concat([s, s])
tm.assert_series_equal(res, exp)
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame({"A": a, "B": b.astype(CategoricalDtype(list("cab")))})
res = pd.concat([df2, df2])
exp = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame(
{"A": a, "B": b.astype(CategoricalDtype(list("cab")))}
).set_index("B")
result = pd.concat([df2, df2])
expected = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
tm.assert_frame_equal(result, expected)
# wrong categories
df3 = DataFrame(
{"A": a, "B": Categorical(b, categories=list("abe"))}
).set_index("B")
msg = "categories must match existing categories when appending"
with pytest.raises(TypeError, match=msg):
pd.concat([df2, df3])
def test_concat_categoricalindex(self):
# GH 16111, categories that aren't lexsorted
categories = [9, 0, 1, 2, 3]
a = Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))
b = Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))
c = Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))
result = pd.concat([a, b, c], axis=1)
exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories)
exp = DataFrame(
{
0: [1, 1, np.nan, np.nan],
1: [np.nan, 2, 2, np.nan],
2: [np.nan, np.nan, 3, 3],
},
columns=[0, 1, 2],
index=exp_idx,
)
tm.assert_frame_equal(result, exp)
def test_concat_order(self):
# GH 17344
dfs = [DataFrame(index=range(3), columns=["a", 1, None])]
dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for i in range(100)]
result = pd.concat(dfs, sort=True).columns
expected = dfs[0].columns
tm.assert_index_equal(result, expected)
def test_concat_different_extension_dtypes_upcasts(self):
a = Series(pd.core.arrays.integer_array([1, 2]))
b = Series(to_decimal([1, 2]))
result = pd.concat([a, b], ignore_index=True)
expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
tm.assert_series_equal(result, expected)
def test_concat_odered_dict(self):
# GH 21510
expected = pd.concat(
[Series(range(3)), Series(range(4))], keys=["First", "Another"]
)
result = pd.concat(
dict([("First", Series(range(3))), ("Another", Series(range(4)))])
)
tm.assert_series_equal(result, expected)
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
@pytest.mark.parametrize("pdt", [Series, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["float"])
def test_concat_no_unnecessary_upcast(dt, pdt):
# GH 13247
dims = pdt(dtype=object).ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], dtype=dt, ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == dt
@pytest.mark.parametrize("pdt", [create_series_with_explicit_dtype, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["int"])
def test_concat_will_upcast(dt, pdt):
with catch_warnings(record=True):
dims = pdt().ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == "float64"
def test_concat_empty_and_non_empty_frame_regression():
# GH 18178 regression test
df1 = DataFrame({"foo": [1]})
df2 = DataFrame({"foo": []})
expected = DataFrame({"foo": [1.0]})
result = pd.concat([df1, df2])
tm.assert_frame_equal(result, expected)
def test_concat_empty_and_non_empty_series_regression():
# GH 18187 regression test
s1 = Series([1])
s2 = Series([], dtype=object)
expected = s1
result = pd.concat([s1, s2])
tm.assert_series_equal(result, expected)
def test_concat_sorts_columns(sort):
# GH-4588
df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"])
df2 = DataFrame({"a": [3, 4], "c": [5, 6]})
# for sort=True/None
expected = DataFrame(
{"a": [1, 2, 3, 4], "b": [1, 2, None, None], "c": [None, None, 5, 6]},
columns=["a", "b", "c"],
)
if sort is False:
expected = expected[["b", "a", "c"]]
# default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], ignore_index=True, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_sorts_index(sort):
df1 = DataFrame({"a": [1, 2, 3]}, index=["c", "a", "b"])
df2 = DataFrame({"b": [1, 2]}, index=["a", "b"])
# For True/None
expected = DataFrame(
{"a": [2, 3, 1], "b": [1, 2, None]}, index=["a", "b", "c"], columns=["a", "b"]
)
if sort is False:
expected = expected.loc[["c", "a", "b"]]
# Warn and sort by default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], axis=1, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_inner_sort(sort):
# https://github.com/pandas-dev/pandas/pull/20613
df1 = DataFrame({"a": [1, 2], "b": [1, 2], "c": [1, 2]}, columns=["b", "a", "c"])
df2 = DataFrame({"a": [1, 2], "b": [3, 4]}, index=[3, 4])
with tm.assert_produces_warning(None):
# unset sort should *not* warn for inner join
# since that never sorted
result = pd.concat([df1, df2], sort=sort, join="inner", ignore_index=True)
expected = DataFrame({"b": [1, 2, 3, 4], "a": [1, 2, 1, 2]}, columns=["b", "a"])
if sort is True:
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort():
# GH-4588
df = DataFrame({"c": [1, 2], "b": [3, 4], "a": [5, 6]}, columns=["c", "b", "a"])
result = pd.concat([df, df], sort=True, ignore_index=True)
expected = DataFrame(
{"a": [5, 6, 5, 6], "b": [3, 4, 3, 4], "c": [1, 2, 1, 2]},
columns=["a", "b", "c"],
)
tm.assert_frame_equal(result, expected)
result = pd.concat([df, df[["c", "b"]]], join="inner", sort=True, ignore_index=True)
expected = expected[["b", "c"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort_does_not_raise():
# GH-4588
# We catch TypeErrors from sorting internally and do not re-raise.
df = DataFrame({1: [1, 2], "a": [3, 4]}, columns=[1, "a"])
expected = DataFrame({1: [1, 2, 1, 2], "a": [3, 4, 3, 4]}, columns=[1, "a"])
result = pd.concat([df, df], ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("s1name,s2name", [(np.int64(190), (43, 0)), (190, (43, 0))])
def test_concat_series_name_npscalar_tuple(s1name, s2name):
# GH21015
s1 = Series({"a": 1, "b": 2}, name=s1name)
s2 = Series({"c": 5, "d": 6}, name=s2name)
result = pd.concat([s1, s2])
expected = Series({"a": 1, "b": 2, "c": 5, "d": 6})
tm.assert_series_equal(result, expected)
def test_concat_categorical_tz():
# GH-23816
a = Series(pd.date_range("2017-01-01", periods=2, tz="US/Pacific"))
b = Series(["a", "b"], dtype="category")
result = pd.concat([a, b], ignore_index=True)
expected = Series(
[
pd.Timestamp("2017-01-01", tz="US/Pacific"),
pd.Timestamp("2017-01-02", tz="US/Pacific"),
"a",
"b",
]
)
tm.assert_series_equal(result, expected)
def test_concat_categorical_unchanged():
# GH-12007
# test fix for when concat on categorical and float
# coerces dtype categorical -> float
df = DataFrame(Series(["a", "b", "c"], dtype="category", name="A"))
ser = Series([0, 1, 2], index=[0, 1, 3], name="B")
result = pd.concat([df, ser], axis=1)
expected = DataFrame(
{
"A": Series(["a", "b", "c", np.nan], dtype="category"),
"B": Series([0, 1, np.nan, 2], dtype="float"),
}
)
tm.assert_equal(result, expected)
def test_concat_empty_df_object_dtype():
# GH 9149
df_1 = DataFrame({"Row": [0, 1, 1], "EmptyCol": np.nan, "NumberCol": [1, 2, 3]})
df_2 = DataFrame(columns=df_1.columns)
result = pd.concat([df_1, df_2], axis=0)
expected = df_1.astype(object)
tm.assert_frame_equal(result, expected)
def test_concat_sparse():
# GH 23557
a = Series(SparseArray([0, 1, 2]))
expected = DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype(
pd.SparseDtype(np.int64, 0)
)
result = pd.concat([a, a], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_dense_sparse():
# GH 30668
a = Series(pd.arrays.SparseArray([1, None]), dtype=float)
b = Series([1], dtype=float)
expected = Series(data=[1, None, 1], index=[0, 1, 0]).astype(
pd.SparseDtype(np.float64, None)
)
result = pd.concat([a, b], axis=0)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("test_series", [True, False])
def test_concat_copy_index(test_series, axis):
# GH 29879
if test_series:
ser = Series([1, 2])
comb = concat([ser, ser], axis=axis, copy=True)
assert comb.index is not ser.index
else:
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
comb = concat([df, df], axis=axis, copy=True)
assert comb.index is not df.index
assert comb.columns is not df.columns
@pytest.mark.parametrize("keys", [["e", "f", "f"], ["f", "e", "f"]])
def test_duplicate_keys(keys):
# GH 33654
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
s1 = Series([7, 8, 9], name="c")
s2 = Series([10, 11, 12], name="d")
result = concat([df, s1, s2], axis=1, keys=keys)
expected_values = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
expected_columns = pd.MultiIndex.from_tuples(
[(keys[0], "a"), (keys[0], "b"), (keys[1], "c"), (keys[2], "d")]
)
expected = DataFrame(expected_values, columns=expected_columns)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"obj",
[
tm.SubclassedDataFrame({"A": np.arange(0, 10)}),
tm.SubclassedSeries(np.arange(0, 10), name="A"),
],
)
def test_concat_preserves_subclass(obj):
# GH28330 -- preserve subclass
result = concat([obj, obj])
assert isinstance(result, type(obj))
def test_concat_frame_axis0_extension_dtypes():
# preserve extension dtype (through common_dtype mechanism)
df1 = DataFrame({"a": pd.array([1, 2, 3], dtype="Int64")})
df2 = DataFrame({"a": np.array([4, 5, 6])})
result = pd.concat([df1, df2], ignore_index=True)
expected = DataFrame({"a": [1, 2, 3, 4, 5, 6]}, dtype="Int64")
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# author: <NAME>
# date: 2020-01-23
#
# This script will perform preprocessing on both training and test data, and create various models to predict the grades of Portuguese subject.
# It will output the best hyperparameters for each model's cross validation, and score the predictions of the different models
# Models used in this script are Linear Regression with Lasso, Linear Regression with Ridge, Random Forest Regressor, XGBoost Regressor and Light GBM Regressor.
#
# Outputs (relative path from project repo):
# Cross-validation results (csv_output_dir_path + "cv_results.csv")
# lmlasso_hyperparam (csv_output_dir_path + "lmlasso_hyperparam.csv")
# lmridge_hyperparam (csv_output_dir_path + "lmridge_hyperparam.csv")
# rf_hyperparam (csv_output_dir_path + "rf_hyperparam.csv")
# xgb_hyperparam (csv_output_dir_path + "xgb_hyperparam.csv")
# lgbm_hyperparam (csv_output_dir_path + "lgbm_hyperparam.csv")
# test_rmse (csv_output_dir_path + "final_results.csv")
# feat_importances (csv_output_dir_path + "feat_importance.csv")
# Plot of top 5 feat (image_output_dir_path + "ranked_features.png")
###################################################################
'''This script will perform preprocessing on both training and test data, and create various models to predict the grades of Portuguese subject.
It will output the best hyperparameters for each model's cross validation, and score the predictions of the different models
Models used in this script are Linear Regression with Lasso, Linear Regression with Ridge, Random Forest Regressor, XGBoost Regressor and Light GBM Regressor.
Usage: modelling.py --train_data_file_path=<train_data_file_path> --test_data_file_path=<test_data_file_path> --csv_output_dir_path=<csv_output_dir_path> --image_output_dir_path=<image_output_dir_path>
Options:
--train_data_file_path=<train_data_file_path> Path (including filename) to the training data csv file.
--test_data_file_path=<test_data_file_path> Path (including filename) to the test data csv file.
--csv_output_dir_path=<csv_output_dir_path> Path (excluding any filenames) to the output csv directory. Must end with "/".
--image_output_dir_path=<image_output_dir_path> Path (excluding any filenames) to the output image directory. Must end with "/".
'''
# Typical packages
import pytest
from docopt import docopt
import numpy as np
import pandas as pd
# Preprocessing
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
# Cross validation
from sklearn.model_selection import cross_validate
# Bayes opt
from bayes_opt import BayesianOptimization
# Linear Regression with Lasso
from sklearn.linear_model import Lasso
# Linear Regression with L2
from sklearn.linear_model import Ridge
# Random Forest
from sklearn.ensemble import RandomForestRegressor
# XGBoost
import xgboost as xgb
# LightGBM
import lightgbm as lgbm
# Scoring
from sklearn.metrics import mean_squared_error
# Plotting
import altair as alt
################################################################
opt = docopt(__doc__)
def main(train_data_file_path, test_data_file_path, csv_output_dir_path, image_output_dir_path):
"""
This function performs the preprocessing and predictive modelling, and outputs various csv files on crossvalidation scores,
model hyperparameters, and plot of top 5 predictive features based on best model.
Parameters
----------
train_data_file_path: str
A string that provides a FILE path (including filename) in which the training data is stored.
Cannot be null, otherwise an error will be thrown.
test_data_file_path: str
A str that provides a FILE path (including filename) in which the test data is stored.
Cannot be null, otherwise an error will be thrown.
csv_output_dir_path: str
A string that provides the DIRECTORY path (including "/" character at the end) to store csv outputs.
image_output_dir_path: str
A string that provides the DIRECTORY path (including "/" character at the end) to store image outputs.
Returns
---------
None
Examples
---------
main(
train_data_file_path="./data/processed/train.csv",
test_data_file_path="./data/processed/test.csv",
csv_output_dir_path="./data/output/",
image_output_dir_path="./img/"
)
"""
if not train_data_file_path:
raise Exception("Please provide a valid file path for training data.")
if not test_data_file_path:
raise Exception("Please provide a valid file path for test data.")
if csv_output_dir_path[-1] != "/":
raise Exception("Please include the '/' character at the end of the csv_output_dir_path")
if image_output_dir_path[-1] != "/":
raise Exception("Please include the '/' character at the end of the image_output_dir_path")
# Training Data
train_data = pd.read_csv(train_data_file_path)
X_train = train_data.drop(["G3", "G2", "G1"], axis = 1)
y_train = train_data["G3"]
# Identify numerical vs categorical features
categorical_features = X_train.loc[:,("school","sex", "address", "famsize", "Pstatus", "Mjob", "Fjob", "reason",
"guardian","schoolsup", "famsup", "paid","activities","nursery", "higher",
"internet","romantic")].columns
numeric_features = X_train.loc[:,("age", "Medu", "Fedu", "traveltime", "studytime", "failures", "famrel",
"freetime", "goout", "Dalc", "Walc", "health", "absences")].columns
# Create preprocessor
preprocessor = ColumnTransformer(
transformers=[
('num', StandardScaler(), numeric_features),
('ohe', OneHotEncoder(drop = "first"), categorical_features)
])
# Convert to dataframe
X_train_trans = pd.DataFrame(preprocessor.fit_transform(X_train),
index = X_train.index,
columns = (list(numeric_features) +
list(preprocessor.named_transformers_['ohe'].get_feature_names(categorical_features))))
######## Modelling Begins ########
# Linear Model Lasso with BayesOpt
def cv_mse_lmlasso(alpha):
"""
Performs cross validation for LM regressor with Lasso regression. To be used for Bayesian optimiser maximizer function.
Parameters
----------
alpha : float
L1 regularisation constant
Returns
-------
float
Cross validation score based on negative mean squared error.
"""
estimator = Lasso(alpha)
# Note that scoring is neg_mean_squared_error, which means higher the score, the better the model
return cross_validate(estimator, X_train_trans, y_train, cv = 10, scoring = "neg_root_mean_squared_error")["test_score"].mean()
lmlasso_params = {'alpha':(0.001,100)}
optimizer_lmlasso = BayesianOptimization(cv_mse_lmlasso, lmlasso_params, random_state = 1)
print("HyperParameter Tuning: Linear Regression with Ridge")
optimizer_lmlasso.maximize(n_iter = 10)
# Linear Model Ridge with BayesOpt
def cv_mse_lmridge(alpha):
"""
Performs cross validation for LM regressor with Ridge regression. To be used for Bayesian optimiser maximizer function.
Parameters
----------
alpha : float
L2 regularisation constant
Returns
-------
float
Cross validation score based on negative mean squared error.
"""
estimator = Ridge(alpha)
# Note that scoring is neg_mean_squared_error, which means higher the score, the better the model
return cross_validate(estimator, X_train_trans, y_train, cv = 10, scoring = "neg_root_mean_squared_error")["test_score"].mean()
lmridge_params = {'alpha':(0.001,100)}
optimizer_lmridge = BayesianOptimization(cv_mse_lmridge, lmridge_params, random_state= 1)
print("HyperParameter Tuning: Linear Regression with Ridge")
optimizer_lmridge.maximize(n_iter = 10)
# SKLearn Random Forest with BayesOpt
def cv_mse_rf(n_estimators,max_depth, max_features):
"""
Performs cross validation for Random Forest Regressor. To be used for Bayesian optimiser maximizer function.
Parameters
----------
n_estimators : float
Number of estimators for random forest
max_depth : float
Max depth of trees in random forest
max_features : float
Max number of features in random forest
Returns
-------
float
Cross validation score based on negative mean squared error.
"""
# Convert chosen hyperparams to discrete integer
max_depth = int(max_depth)
max_features = int(max_features)
n_estimators = int(n_estimators)
estimator = RandomForestRegressor(n_estimators = n_estimators, max_depth = max_depth, max_features = max_features)
# Note that scoring is neg_mean_squared_error, which means higher the score, the better the model
return cross_validate(estimator, X_train_trans, y_train, cv = 10, scoring = "neg_root_mean_squared_error")["test_score"].mean()
rf_params = {'n_estimators':(10,150), 'max_depth':(10,200), 'max_features':(2, 30)}
optimizer_rf = BayesianOptimization(cv_mse_rf, rf_params, random_state= 1)
print("HyperParameter Tuning: Random Forest Regressor")
optimizer_rf.maximize(n_iter = 20)
# XGBoost Regressor with BayesOpt
def cv_mse_xgb(n_estimators, max_depth, learning_rate, subsample, gamma, reg_alpha, reg_lambda):
"""
Performs cross validation for Random Forest Regressor. To be used for Bayesian optimiser maximizer function.
Parameters
----------
n_estimators : float
Number of estimators
max_depth : float
Max depth of trees
learning_rate : float
Learning rate
subsample : float
Subsample ratio of training instances
gamma : float
Min loss reduction to make further partition on leaf node
reg_alpha : float
L1 regularisation
reg_lambda : float
L2 regularisation
Returns
-------
float
Cross validation score based on negative mean squared error.
"""
# Convert chosen hyperparams to discrete integer
max_depth = int(max_depth)
n_estimators = int(n_estimators)
estimator = xgb.XGBRegressor(objective='reg:squarederror',
n_estimators = n_estimators,
max_depth = max_depth,
learning_rate = learning_rate,
subsample = subsample,
gamma = gamma,
reg_alpha = reg_alpha,
reg_lambda = reg_lambda)
# Note that scoring is neg_mean_squared_error, which means higher the score, the better the model
return cross_validate(estimator, X_train_trans, y_train, cv = 10, scoring = "neg_root_mean_squared_error")["test_score"].mean()
xgb_params = {'n_estimators':(10, 150), 'max_depth':(10, 200), 'learning_rate':(0, 1),
'subsample':(0, 1), 'gamma':(0, 50), 'reg_alpha':(0, 100), 'reg_lambda':(0, 100)}
# Warnings due to some current issue with xgboost incompatibility with pandas deprecation
# Fix will be for upcoming xgboost version 1.0.0, but latest version is only 0.90
# See https://github.com/dmlc/xgboost/issues/4300
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
optimizer_xgb = BayesianOptimization(cv_mse_xgb, xgb_params, random_state = 1)
print("HyperParameter Tuning: XGBoost Regressor")
optimizer_xgb.maximize(n_iter = 20)
# LightGBM with BayesOpt
def cv_mse_lgbm(n_estimators, max_depth, learning_rate, reg_alpha, reg_lambda):
"""
Performs cross validation for Random Forest Regressor. To be used for Bayesian optimiser maximizer function.
Parameters
----------
n_estimators : float
Number of estimators
max_depth : float
Max depth of trees
learning_rate : float
Learning rate
reg_alpha : float
L1 regularisation
reg_lambda : float
L2 regularisation
Returns
-------
float
Cross validation score based on negative mean squared error.
"""
# Convert chosen hyperparams to discrete integer
max_depth = int(max_depth)
n_estimators = int(n_estimators)
estimator = lgbm.LGBMRegressor(n_estimators = n_estimators,
max_depth = max_depth,
learning_rate = learning_rate,
reg_alpha = reg_alpha,
reg_lambda = reg_lambda)
# Note that scoring is neg_mean_squared_error, which means higher the score, the better the model
return cross_validate(estimator, X_train_trans, y_train, cv = 10, scoring = "neg_root_mean_squared_error")["test_score"].mean()
lgbm_params = {'n_estimators':(10, 150), 'max_depth':(10, 200), 'learning_rate':(0.001, 1),
'reg_alpha':(0, 100), 'reg_lambda':(0, 100)}
optimizer_lgbm = BayesianOptimization(cv_mse_lgbm, lgbm_params, random_state = 1)
print("HyperParameter Tuning: LGBM Regressor")
optimizer_lgbm.maximize(n_iter = 20)
# Compare CV Scores across the best models
cv_rmse = [-optimizer_lmlasso.max['target'],
-optimizer_lmridge.max['target'],
-optimizer_rf.max['target'],
-optimizer_xgb.max['target'],
-optimizer_lgbm.max['target']]
models = ["lm_lasso", "lm_ridge", "randomforest", "xgb", "lgbm"]
cv_df = pd.DataFrame(cv_rmse, index = models, columns = ["cv_score"])
# Output CV_df to csv
cv_df.to_csv(csv_output_dir_path+"/cv_results.csv")
# Adjusting discrete hyperparam for certain models
lmlasso_hyperparam = optimizer_lmlasso.max['params']
lmridge_hyperparam = optimizer_lmridge.max['params']
rf_hyperparam = optimizer_rf.max['params']
rf_hyperparam['max_depth'] = int(rf_hyperparam['max_depth'])
rf_hyperparam['max_features'] = int(rf_hyperparam['max_features'])
rf_hyperparam['n_estimators'] = int(rf_hyperparam['n_estimators'])
xgb_hyperparam = optimizer_xgb.max['params']
xgb_hyperparam['max_depth'] = int(xgb_hyperparam['max_depth'])
xgb_hyperparam['n_estimators'] = int(xgb_hyperparam['n_estimators'])
lgbm_hyperparam = optimizer_lgbm.max['params']
lgbm_hyperparam['max_depth'] = int(lgbm_hyperparam['max_depth'])
lgbm_hyperparam['n_estimators'] = int(lgbm_hyperparam['n_estimators'])
# Store as Series for writing to csv.
lmlasso_hyperparam_series = pd.Series(optimizer_lmlasso.max['params'])
lmridge_hyperparam_series = | pd.Series(optimizer_lmridge.max['params']) | pandas.Series |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert r_grouped['g2'].wrapper.ndim == 2
assert r_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped[['g1']].wrapper.ndim == 2
assert r_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert r_grouped[['g1', 'g2']].wrapper.ndim == 2
assert r_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_filtering(self):
filtered_records = vbt.Records(wrapper, records_arr[[0, -1]])
record_arrays_close(
filtered_records.values,
np.array([(0, 0, 0, 10., 21.), (8, 2, 2, 10., 21.)], dtype=example_dt)
)
# a
record_arrays_close(
filtered_records['a'].values,
np.array([(0, 0, 0, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['a'].map_field('some_field1').id_arr,
np.array([0])
)
assert filtered_records['a'].map_field('some_field1').min() == 10.
assert filtered_records['a'].count() == 1.
# b
record_arrays_close(
filtered_records['b'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['b'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['b'].map_field('some_field1').min())
assert filtered_records['b'].count() == 0.
# c
record_arrays_close(
filtered_records['c'].values,
np.array([(8, 0, 2, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['c'].map_field('some_field1').id_arr,
np.array([8])
)
assert filtered_records['c'].map_field('some_field1').min() == 10.
assert filtered_records['c'].count() == 1.
# d
record_arrays_close(
filtered_records['d'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['d'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['d'].map_field('some_field1').min())
assert filtered_records['d'].count() == 0.
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count'
], dtype='object')
pd.testing.assert_series_equal(
records.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 2.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
records.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 3
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
records.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 6
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c')
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records.stats(column='g2', group_by=group_by)
)
stats_df = records.stats(agg_func=None)
assert stats_df.shape == (4, 4)
pd.testing.assert_index_equal(stats_df.index, records.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# ranges.py ############# #
ts = pd.DataFrame({
'a': [1, -1, 3, -1, 5, -1],
'b': [-1, -1, -1, 4, 5, 6],
'c': [1, 2, 3, -1, -1, -1],
'd': [-1, -1, -1, -1, -1, -1]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
ranges = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days'))
ranges_grouped = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestRanges:
def test_mapped_fields(self):
for name in range_dt.names:
np.testing.assert_array_equal(
getattr(ranges, name).values,
ranges.values[name]
)
def test_from_ts(self):
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 2, 3, 1), (2, 0, 4, 5, 1), (3, 1, 3, 5, 0), (4, 2, 0, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper.freq == day_dt
pd.testing.assert_index_equal(
ranges_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = ranges.records_readable
np.testing.assert_array_equal(
records_readable['Range Id'].values,
np.array([
0, 1, 2, 3, 4
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-01T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed'
])
)
def test_to_mask(self):
pd.testing.assert_series_equal(
ranges['a'].to_mask(),
ts['a'] != -1
)
pd.testing.assert_frame_equal(
ranges.to_mask(),
ts != -1
)
pd.testing.assert_frame_equal(
ranges_grouped.to_mask(),
pd.DataFrame(
[
[True, True],
[False, True],
[True, True],
[True, False],
[True, False],
[True, False]
],
index=ts.index,
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_duration(self):
np.testing.assert_array_equal(
ranges['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_equal(
ranges.duration.values,
np.array([1, 1, 1, 3, 3])
)
def test_avg_duration(self):
assert ranges['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.avg_duration(),
pd.Series(
np.array([129600000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert ranges['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.max_duration(),
pd.Series(
np.array([259200000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert ranges['a'].coverage() == 0.5
pd.testing.assert_series_equal(
ranges.coverage(),
pd.Series(
np.array([0.5, 0.5, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(),
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage()
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True),
pd.Series(
np.array([1.0, 1.0, 1.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True, normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
ranges_grouped.replace(records_arr=np.repeat(ranges_grouped.values, 2)).coverage()
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage', 'Overlap Coverage',
'Total Records', 'Duration: Min', 'Duration: Median', 'Duration: Max',
'Duration: Mean', 'Duration: Std'
], dtype='object')
pd.testing.assert_series_equal(
ranges.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 1.25, pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 3, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), 4, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('1 days 12:00:00'), pd.Timedelta('1 days 00:00:00')
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c')
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges.stats(column='g2', group_by=group_by)
)
stats_df = ranges.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, ranges.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# drawdowns.py ############# #
ts2 = pd.DataFrame({
'a': [2, 1, 3, 1, 4, 1],
'b': [1, 2, 1, 3, 1, 4],
'c': [1, 2, 3, 2, 1, 2],
'd': [1, 2, 3, 4, 5, 6]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
drawdowns = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days'))
drawdowns_grouped = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestDrawdowns:
def test_mapped_fields(self):
for name in drawdown_dt.names:
np.testing.assert_array_equal(
getattr(drawdowns, name).values,
drawdowns.values[name]
)
def test_ts(self):
pd.testing.assert_frame_equal(
drawdowns.ts,
ts2
)
pd.testing.assert_series_equal(
drawdowns['a'].ts,
ts2['a']
)
pd.testing.assert_frame_equal(
drawdowns_grouped['g1'].ts,
ts2[['a', 'b']]
)
assert drawdowns.replace(ts=None)['a'].ts is None
def test_from_ts(self):
record_arrays_close(
drawdowns.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1),
(4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
assert drawdowns.wrapper.freq == day_dt
pd.testing.assert_index_equal(
drawdowns_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = drawdowns.records_readable
np.testing.assert_array_equal(
records_readable['Drawdown Id'].values,
np.array([
0, 1, 2, 3, 4, 5
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Peak Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-04T00:00:00.000000000', '2020-01-03T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Valley Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-05T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-03T00:00:00.000000000', '2020-01-05T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Peak Value'].values,
np.array([
2., 3., 4., 2., 3., 3.
])
)
np.testing.assert_array_equal(
records_readable['Valley Value'].values,
np.array([
1., 1., 1., 1., 1., 1.
])
)
np.testing.assert_array_equal(
records_readable['End Value'].values,
np.array([
3., 4., 1., 3., 4., 2.
])
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Recovered', 'Recovered', 'Active', 'Recovered', 'Recovered', 'Active'
])
)
def test_drawdown(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].drawdown.values,
np.array([-0.5, -0.66666667, -0.75])
)
np.testing.assert_array_almost_equal(
drawdowns.drawdown.values,
np.array([-0.5, -0.66666667, -0.75, -0.5, -0.66666667, -0.66666667])
)
pd.testing.assert_frame_equal(
drawdowns.drawdown.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[-0.5, np.nan, np.nan, np.nan],
[np.nan, -0.5, np.nan, np.nan],
[-0.66666669, np.nan, np.nan, np.nan],
[-0.75, -0.66666669, -0.66666669, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_drawdown(self):
assert drawdowns['a'].avg_drawdown() == -0.6388888888888888
pd.testing.assert_series_equal(
drawdowns.avg_drawdown(),
pd.Series(
np.array([-0.63888889, -0.58333333, -0.66666667, np.nan]),
index=wrapper.columns
).rename('avg_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_drawdown(),
pd.Series(
np.array([-0.6166666666666666, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_drawdown')
)
def test_max_drawdown(self):
assert drawdowns['a'].max_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.max_drawdown(),
pd.Series(
np.array([-0.75, -0.66666667, -0.66666667, np.nan]),
index=wrapper.columns
).rename('max_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_drawdown(),
pd.Series(
np.array([-0.75, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_drawdown')
)
def test_recovery_return(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_return.values,
np.array([2., 3., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_return.values,
np.array([2., 3., 0., 2., 3., 1.])
)
pd.testing.assert_frame_equal(
drawdowns.recovery_return.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[2.0, np.nan, np.nan, np.nan],
[np.nan, 2.0, np.nan, np.nan],
[3.0, np.nan, np.nan, np.nan],
[0.0, 3.0, 1.0, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_recovery_return(self):
assert drawdowns['a'].avg_recovery_return() == 1.6666666666666667
pd.testing.assert_series_equal(
drawdowns.avg_recovery_return(),
pd.Series(
np.array([1.6666666666666667, 2.5, 1.0, np.nan]),
index=wrapper.columns
).rename('avg_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_recovery_return(),
pd.Series(
np.array([2.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_recovery_return')
)
def test_max_recovery_return(self):
assert drawdowns['a'].max_recovery_return() == 3.0
pd.testing.assert_series_equal(
drawdowns.max_recovery_return(),
pd.Series(
np.array([3.0, 3.0, 1.0, np.nan]),
index=wrapper.columns
).rename('max_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_recovery_return(),
pd.Series(
np.array([3.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_recovery_return')
)
def test_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_almost_equal(
drawdowns.duration.values,
np.array([1, 1, 1, 1, 1, 3])
)
def test_avg_duration(self):
assert drawdowns['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.avg_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert drawdowns['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.max_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert drawdowns['a'].coverage() == 0.5
pd.testing.assert_series_equal(
drawdowns.coverage(),
pd.Series(
np.array([0.5, 0.3333333333333333, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
drawdowns_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
def test_decline_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].decline_duration.values,
np.array([1., 1., 1.])
)
np.testing.assert_array_almost_equal(
drawdowns.decline_duration.values,
np.array([1., 1., 1., 1., 1., 2.])
)
def test_recovery_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration.values,
np.array([1, 1, 0])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration.values,
np.array([1, 1, 0, 1, 1, 1])
)
def test_recovery_duration_ratio(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration_ratio.values,
np.array([1., 1., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration_ratio.values,
np.array([1., 1., 0., 1., 1., 0.5])
)
def test_active_records(self):
assert isinstance(drawdowns.active, vbt.Drawdowns)
assert drawdowns.active.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4., 1., 1., 0)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].active.values,
drawdowns.active['a'].values
)
record_arrays_close(
drawdowns.active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
def test_recovered_records(self):
assert isinstance(drawdowns.recovered, vbt.Drawdowns)
assert drawdowns.recovered.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].recovered.values,
drawdowns.recovered['a'].values
)
record_arrays_close(
drawdowns.recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1), (4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
def test_active_drawdown(self):
assert drawdowns['a'].active_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.active_drawdown(),
pd.Series(
np.array([-0.75, np.nan, -0.3333333333333333, np.nan]),
index=wrapper.columns
).rename('active_drawdown')
)
with pytest.raises(Exception):
drawdowns_grouped.active_drawdown()
def test_active_duration(self):
assert drawdowns['a'].active_duration() == np.timedelta64(86400000000000)
pd.testing.assert_series_equal(
drawdowns.active_duration(),
pd.Series(
np.array([86400000000000, 'NaT', 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_duration()
def test_active_recovery(self):
assert drawdowns['a'].active_recovery() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery(),
pd.Series(
np.array([0., np.nan, 0.5, np.nan]),
index=wrapper.columns
).rename('active_recovery')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery()
def test_active_recovery_return(self):
assert drawdowns['a'].active_recovery_return() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery_return(),
pd.Series(
np.array([0., np.nan, 1., np.nan]),
index=wrapper.columns
).rename('active_recovery_return')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_return()
def test_active_recovery_duration(self):
assert drawdowns['a'].active_recovery_duration() == pd.Timedelta('0 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.active_recovery_duration(),
pd.Series(
np.array([0, 'NaT', 86400000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_recovery_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_duration()
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage [%]', 'Total Records',
'Total Recovered Drawdowns', 'Total Active Drawdowns',
'Active Drawdown [%]', 'Active Duration', 'Active Recovery [%]',
'Active Recovery Return [%]', 'Active Recovery Duration',
'Max Drawdown [%]', 'Avg Drawdown [%]', 'Max Drawdown Duration',
'Avg Drawdown Duration', 'Max Recovery Return [%]',
'Avg Recovery Return [%]', 'Max Recovery Duration',
'Avg Recovery Duration', 'Avg Recovery Duration Ratio'
], dtype='object')
pd.testing.assert_series_equal(
drawdowns.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(settings=dict(incl_active=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 69.44444444444444, 62.962962962962955,
pd.Timedelta('1 days 16:00:00'), pd.Timedelta('1 days 16:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 50.0, 3, 2, 1, 75.0, pd.Timedelta('1 days 00:00:00'),
0.0, 0.0, pd.Timedelta('0 days 00:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 41.66666666666667, 5, 4, 1, 66.66666666666666,
58.33333333333333, pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
300.0, 250.0, pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=pd.Index([
'Start', 'End', 'Period', 'Coverage [%]', 'Total Records',
'Total Recovered Drawdowns', 'Total Active Drawdowns',
'Max Drawdown [%]', 'Avg Drawdown [%]', 'Max Drawdown Duration',
'Avg Drawdown Duration', 'Max Recovery Return [%]',
'Avg Recovery Return [%]', 'Max Recovery Duration',
'Avg Recovery Duration', 'Avg Recovery Duration Ratio'
], dtype='object'),
name='g1'
)
)
pd.testing.assert_series_equal(
drawdowns['c'].stats(),
drawdowns.stats(column='c')
)
pd.testing.assert_series_equal(
drawdowns['c'].stats(),
drawdowns.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
drawdowns_grouped['g2'].stats(),
drawdowns_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
drawdowns_grouped['g2'].stats(),
drawdowns.stats(column='g2', group_by=group_by)
)
stats_df = drawdowns.stats(agg_func=None)
assert stats_df.shape == (4, 21)
pd.testing.assert_index_equal(stats_df.index, drawdowns.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# orders.py ############# #
close = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6),
datetime(2020, 1, 7),
datetime(2020, 1, 8)
]).vbt.tile(4, keys=['a', 'b', 'c', 'd'])
size = np.full(close.shape, np.nan, dtype=np.float_)
size[:, 0] = [1, 0.1, -1, -0.1, np.nan, 1, -1, 2]
size[:, 1] = [-1, -0.1, 1, 0.1, np.nan, -1, 1, -2]
size[:, 2] = [1, 0.1, -1, -0.1, np.nan, 1, -2, 2]
orders = vbt.Portfolio.from_orders(close, size, fees=0.01, freq='1 days').orders
orders_grouped = orders.regroup(group_by)
class TestOrders:
def test_mapped_fields(self):
for name in order_dt.names:
np.testing.assert_array_equal(
getattr(orders, name).values,
orders.values[name]
)
def test_close(self):
pd.testing.assert_frame_equal(
orders.close,
close
)
pd.testing.assert_series_equal(
orders['a'].close,
close['a']
)
pd.testing.assert_frame_equal(
orders_grouped['g1'].close,
close[['a', 'b']]
)
assert orders.replace(close=None)['a'].close is None
def test_records_readable(self):
records_readable = orders.records_readable
np.testing.assert_array_equal(
records_readable['Order Id'].values,
np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20
])
)
np.testing.assert_array_equal(
records_readable['Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-02T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-04T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b', 'b',
'b', 'c', 'c', 'c', 'c', 'c', 'c', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Size'].values,
np.array([
1.0, 0.1, 1.0, 0.1, 1.0, 1.0, 2.0, 1.0, 0.1, 1.0, 0.1, 1.0, 1.0,
2.0, 1.0, 0.1, 1.0, 0.1, 1.0, 2.0, 2.0
])
)
np.testing.assert_array_equal(
records_readable['Price'].values,
np.array([
1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0,
8.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Fees'].values,
np.array([
0.01, 0.002, 0.03, 0.004, 0.06, 0.07, 0.16, 0.01, 0.002, 0.03,
0.004, 0.06, 0.07, 0.16, 0.01, 0.002, 0.03, 0.004, 0.06, 0.14,
0.16
])
)
np.testing.assert_array_equal(
records_readable['Side'].values,
np.array([
'Buy', 'Buy', 'Sell', 'Sell', 'Buy', 'Sell', 'Buy', 'Sell', 'Sell',
'Buy', 'Buy', 'Sell', 'Buy', 'Sell', 'Buy', 'Buy', 'Sell', 'Sell',
'Buy', 'Sell', 'Buy'
])
)
def test_buy_records(self):
assert isinstance(orders.buy, vbt.Orders)
assert orders.buy.wrapper == orders.wrapper
record_arrays_close(
orders['a'].buy.values,
np.array([
(0, 0, 0, 1., 1., 0.01, 0), (1, 0, 1, 0.1, 2., 0.002, 0),
(4, 0, 5, 1., 6., 0.06, 0), (6, 0, 7, 2., 8., 0.16, 0)
], dtype=order_dt)
)
record_arrays_close(
orders['a'].buy.values,
orders.buy['a'].values
)
record_arrays_close(
orders.buy.values,
np.array([
(0, 0, 0, 1., 1., 0.01, 0), (1, 0, 1, 0.1, 2., 0.002, 0),
(4, 0, 5, 1., 6., 0.06, 0), (6, 0, 7, 2., 8., 0.16, 0),
(9, 1, 2, 1., 3., 0.03, 0), (10, 1, 3, 0.1, 4., 0.004, 0),
(12, 1, 6, 1., 7., 0.07, 0), (14, 2, 0, 1., 1., 0.01, 0),
(15, 2, 1, 0.1, 2., 0.002, 0), (18, 2, 5, 1., 6., 0.06, 0),
(20, 2, 7, 2., 8., 0.16, 0)
], dtype=order_dt)
)
def test_sell_records(self):
assert isinstance(orders.sell, vbt.Orders)
assert orders.sell.wrapper == orders.wrapper
record_arrays_close(
orders['a'].sell.values,
np.array([
(2, 0, 2, 1., 3., 0.03, 1), (3, 0, 3, 0.1, 4., 0.004, 1),
(5, 0, 6, 1., 7., 0.07, 1)
], dtype=order_dt)
)
record_arrays_close(
orders['a'].sell.values,
orders.sell['a'].values
)
record_arrays_close(
orders.sell.values,
np.array([
(2, 0, 2, 1., 3., 0.03, 1), (3, 0, 3, 0.1, 4., 0.004, 1),
(5, 0, 6, 1., 7., 0.07, 1), (7, 1, 0, 1., 1., 0.01, 1),
(8, 1, 1, 0.1, 2., 0.002, 1), (11, 1, 5, 1., 6., 0.06, 1),
(13, 1, 7, 2., 8., 0.16, 1), (16, 2, 2, 1., 3., 0.03, 1),
(17, 2, 3, 0.1, 4., 0.004, 1), (19, 2, 6, 2., 7., 0.14, 1)
], dtype=order_dt)
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Total Records', 'Total Buy Orders', 'Total Sell Orders',
'Min Size', 'Max Size', 'Avg Size', 'Avg Buy Size', 'Avg Sell Size',
'Avg Buy Price', 'Avg Sell Price', 'Total Fees', 'Min Fees', 'Max Fees',
'Avg Fees', 'Avg Buy Fees', 'Avg Sell Fees'
], dtype='object')
pd.testing.assert_series_equal(
orders.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 5.25, 2.75, 2.5, 0.10000000000000002, 2.0,
0.9333333333333335, 0.9166666666666666, 0.9194444444444446, 4.388888888888889,
4.527777777777779, 0.26949999999999996, 0.002, 0.16, 0.051333333333333335,
0.050222222222222224, 0.050222222222222224
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
orders.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 7, 4, 3, 0.1, 2.0, 0.8857142857142858,
1.025, 0.7000000000000001, 4.25, 4.666666666666667, 0.33599999999999997,
0.002, 0.16, 0.047999999999999994, 0.057999999999999996, 0.03466666666666667
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
orders.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 14, 7, 7, 0.1, 2.0, 0.8857142857142858,
0.8857142857142856, 0.8857142857142858, 4.428571428571429, 4.428571428571429,
0.672, 0.002, 0.16, 0.048, 0.048, 0.047999999999999994
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
orders['c'].stats(),
orders.stats(column='c')
)
pd.testing.assert_series_equal(
orders['c'].stats(),
orders.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
orders_grouped['g2'].stats(),
orders_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
orders_grouped['g2'].stats(),
orders.stats(column='g2', group_by=group_by)
)
stats_df = orders.stats(agg_func=None)
assert stats_df.shape == (4, 19)
pd.testing.assert_index_equal(stats_df.index, orders.wrapper.columns)
| pd.testing.assert_index_equal(stats_df.columns, stats_index) | pandas.testing.assert_index_equal |
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import json
import pandas as pd
import numpy as np
import pathlib
import streamlit as st
import getYfData as yfd
from time import sleep
import time
from datetime import datetime
from datetime import timedelta
from dateutil.relativedelta import relativedelta, MO
import math
from dataclasses import dataclass
from dataclasses import field
from dataclasses import InitVar
# ====================
# WAITING TASKS
# ====================
#add json object checker to all api json calls
#label every step in functions
# reduce nested loops to shorter line code
#correct outout size for last dc
# ====================
# DATACLASS FUNCTIONS
# ====================
@dataclass # data class to hold single ticker dataclass object
class singleTickerData(object):
ticker: str
interval: str
start_date: str
end_date: str
outputsize: int
df_tsMeta: pd.core.frame.DataFrame
df_tsData: pd.core.frame.DataFrame
@dataclass # data class to hold list of ticker dataclass object
class multiTickerData(object):
listTickerDClass: list
def populatelist(dcItem):
listTickerDClass.append(dcItem)
@dataclass # data class to hold input values for each ticker - mostly useless might remove it
class singleTickerInput(object):
# api keys
#alpha_vantage_api_key : str = "<KEY>"
twelvedata_api_key: str = "69287070d2f24f60a821b96ec1281011"
ticker: str = "AAPL"
interval: str = "1min"
start_date: str = "2016-01-20"
end_date: str = ""
earliestDateTime_data: str = ""
earliestUnixTime_data: str = ""
timezone: str = ""
# ====================
# TEXT FORMATTING FUNCTIONS
# ====================
# ====================
# HELPER FUNCTIONS
# ====================
# function to create folder(nFolder):
def createfolder(nwFolder):
ii = pathlib.Path(__file__).parent.resolve().parents[0]
#print(f'{str(ii)} is main directory')
p = ii / f'{nwFolder}'
#p = pathlib.Path(f'{nwFolder}/')
if not p.exists():
p.mkdir(parents=True, exist_ok=True)
# fn = "test.txt" # I don't know what is your fn
#filepath = p / fn
# with filepath.open("w", encoding ="utf-8") as f:
# f.write(data)
return p
# function to confirm a json key/pair exists
def chk_json_key_exists(json_key, json_object):
if (json_key in json_object):
json_key_exist = True
else:
json_key_exist = False
return json_key_exist
# create json.files folder if not exist and dump json file in ot
def dumpjsonData(filename, json_object):
p = str(createfolder('files.json'))
print(f'json files will be stored in {p} folder')
# creation of json file for files with status ok
mypath = pathlib.Path(f'{p}', f'{filename}.json')
with open(mypath, "w") as outfile:
json.dump(json_object, outfile, indent=4, sort_keys=True)
# convert json key/pair to df if status key is "ok"
def get_tck_apistocks_json_df(json_object, status_hdr='status', json_key='data'):
status_hdr_exist = chk_json_key_exists(
json_key=status_hdr, json_object=json_object)
data_df = pd.DataFrame()
if (status_hdr_exist == True):
status_hdr_data = json_object[status_hdr]
if status_hdr_data == 'ok':
json_key_data = json_object[json_key]
data_df = pd.DataFrame(json_key_data)
return data_df
# get ticker stats: shares float and outstanding from 12data statistics api call
def get_tck_stats_items(apikey_12Data, symbol):
data_type = "statistics"
apikey = apikey_12Data
twelvedata_url = f'https://api.twelvedata.com/{data_type}?symbol={symbol}&apikey={apikey}'
json_object = requests.get(twelvedata_url).json()
json_object = chkJsonObj(json_object)
statistics_exists = chk_json_key_exists(
json_key="statistics", json_object=json_object)
if statistics_exists:
json_object = json_object["statistics"]
stock_statistics_exists = chk_json_key_exists(
json_key="stock_statistics", json_object=json_object)
if stock_statistics_exists:
json_object = json_object["stock_statistics"]
shares_outstand_data = json_object["shares_outstanding"]
float_shares_data = json_object["float_shares"]
return shares_outstand_data, float_shares_data
else:
# we wont get data if we dont have a premium 12data subscription, go get data from yfinanace
shares_outstand_data, float_shares_data, error_out_shre, error_flt_shre = yfd.get_yf_float_outstand_shares(
symbol)
return shares_outstand_data, float_shares_data, error_out_shre, error_flt_shre
# get ticker stats: shares float and outstanding from yfinance
def get_tck_stats_items_from_yFin(symbol):
shares_outstand_data, float_shares_data, error_out_shre, error_flt_shre = yfd.get_yf_float_outstand_shares(
symbol)
return shares_outstand_data, float_shares_data, error_out_shre, error_flt_shre
# get ticker stocks df: list of tickers from 12data statistics api call
# @st.cache
def get_tck_stocks_df(apikey_12Data):
data_type = "stocks"
apikey = apikey_12Data
twelvedata_url = f'https://api.twelvedata.com/{data_type}'
json_object = requests.get(twelvedata_url).json()
json_object = chkJsonObj(json_object)
#types_stocks = ['EQUITY', 'Common', 'Common Stock', 'American Depositary Receipt',
# 'Real Estate Investment Trust (REIT)', 'Unit', 'GDR', 'Closed-end Fund',
# 'ETF', 'Depositary Receipt', 'Preferred Stock', 'Limited Partnership',
# 'OTHER_SECURITY_TYPE', 'Warrant', 'STRUCTURED_PRODUCT', 'Exchange-traded Note',
# 'Right', 'FUND', 'Trust', 'Index', 'Unit Of Beneficial Interest',
# 'MUTUALFUND', 'New York Registered Shares']
data_df = get_tck_apistocks_json_df(
json_object, status_hdr='status', json_key='data')
return data_df
# get ticker name series: from imported df -called from combine_stock_stats_items
def get_tcker_symbol_lst(data_df):
symbol_lst = sorted(data_df["symbol"].unique())
return symbol_lst
# get ticker type series: from imported df -called from combine_stock_stats_items
def get_tcker_type_lst(data_df):
type_lst = sorted(data_df["type"].unique())
return type_lst
# get ticker country series: from imported df -called from combine_stock_stats_items
def get_tcker_country_lst(data_df):
country_lst = sorted(data_df["country"].unique())
return country_lst
# get ticker exchange series: from imported df -called from combine_stock_stats_items
def get_tcker_exchange_lst(data_df):
exchange_lst = sorted(data_df["exchange"].unique())
return exchange_lst
# confirm ifmembers of symbol series exist in symbol list: not finished not returning anything !!!!!!!!!
def filter_tcker_symbollist(symbol_lst):
if len(symbol_lst) != 0:
cond_symbol = stocks_df["symbol"].isin(symbol_lst)
# filter tickers stock df for filter conditions
def filter_tcker(apikey_12Data, stocks_df, symbol_select, type_select, country_select, exchange_select):
lstOfDf = []
if len(symbol_select) != 0:
cond_symbol = stocks_df["symbol"].isin(symbol_select)
df_symbol = stocks_df[cond_symbol]
lstOfDf.append(df_symbol)
if len(type_select) != 0:
cond_type = stocks_df["type"].isin(type_select)
df_type = stocks_df[cond_type]
lstOfDf.append(df_type)
if len(country_select) != 0:
cond_country = stocks_df["country"].isin(country_select)
df_country = stocks_df[cond_country]
lstOfDf.append(df_country)
if len(exchange_select) != 0:
cond_exchange = stocks_df["exchange"].isin(exchange_select)
df_exchange = stocks_df[cond_exchange]
lstOfDf.append(df_exchange)
if len(lstOfDf) == 0:
df_filter = stocks_df
elif len(lstOfDf) == 1:
df_filter = lstOfDf[0]
elif len(lstOfDf) == 2:
df_filter = pd.merge(lstOfDf[0], lstOfDf[1], how='inner', on=[
'symbol', 'type', 'country', 'exchange'], suffixes=('', '_DROPA')).filter(regex='^(?!.*_DROPA)')
elif len(lstOfDf) == 3:
df_filter = pd.merge(lstOfDf[0], lstOfDf[1], how='inner', on=[
'symbol', 'type', 'country', 'exchange'], suffixes=('', '_DROPA')).filter(regex='^(?!.*_DROPA)')
df_filter = pd.merge(df_filter, lstOfDf[2], how='inner', on=[
'symbol', 'type', 'country', 'exchange'], suffixes=('', '_DROPB')).filter(regex='^(?!.*_DROPB)')
elif len(lstOfDf) == 4:
df_filter = pd.merge(lstOfDf[0], lstOfDf[1], how='inner', on=[
'symbol', 'type', 'country', 'exchange'], suffixes=('', '_DROPA')).filter(regex='^(?!.*_DROPA)')
df_filter = pd.merge(df_filter, lstOfDf[2], how='inner', on=[
'symbol', 'type', 'country', 'exchange'], suffixes=('', '_DROPB')).filter(regex='^(?!.*_DROPB)')
df_filter = pd.merge(df_filter, lstOfDf[3], how='inner', on=[
'symbol', 'type', 'country', 'exchange'], suffixes=('', '_DROPC')).filter(regex='^(?!.*_DROPC)')
df_filter, symb_error_out_shre_lst, symb_error_flt_shre_lst = combine_stock_stats_items(
apikey_12Data, df_filter)
df_filter = addEarliestTimeStampsSeries(apikey_12Data, df_filter)
return df_filter, symb_error_out_shre_lst, symb_error_flt_shre_lst
# add time-stamp series to stock dataframe
def addEarliestTimeStampsSeries(apikey_12Data, df_filter):
str_timestamp_lst = []
unx_timestamp_lst = []
for indx in df_filter.index:
symbol = df_filter['symbol'][indx]
earliest_dct = getTickerEarliesrTimeStamp(apikey_12Data, symbol)
earilest_dt_str = earliest_dct['datetime_data']
earilest_dt_unx = earliest_dct['unix_time_data']
str_timestamp_lst.append(earilest_dt_str)
unx_timestamp_lst.append(earilest_dt_unx)
df_filter["earliest_timestamp"] = str_timestamp_lst
df_filter["earliest_timestamp_unix"] = unx_timestamp_lst
return df_filter
#combine stock df with shares float/outstanding series
def combine_stock_stats_items(apikey_12Data, data_df):
# get listing of symbols
symbol_lst = get_tcker_symbol_lst(data_df)
cond_lst = []
shares_outstand_lst = []
float_shares_lst = []
symb_error_out_shre_lst = []
symb_error_flt_shre_lst = []
# loop names to get 2 shares data
for symbol in symbol_lst:
shares_outstand_data, float_shares_data, error_out_shre, error_flt_shre = get_tck_stats_items_from_yFin(
symbol)
# append symbols not returning outstanding/float shares to error lists
if error_out_shre == True:
symb_error_out_shre_lst.append(symbol)
if error_flt_shre == True:
symb_error_flt_shre_lst.append(symbol)
cond = (data_df["symbol"] == symbol)
cond_lst.append(cond)
shares_outstand_lst.append(shares_outstand_data)
float_shares_lst.append(float_shares_data)
data_df["shares-outstanding"] = np.select(cond_lst, shares_outstand_lst)
data_df["float-shares"] = np.select(cond_lst, float_shares_lst)
return data_df, symb_error_out_shre_lst, symb_error_flt_shre_lst
# append data to data_df
#data_df = get_tck_stocks_df(apikey_12Data)
#get and clean up a list of tickers from a string
def get_tcker_lst_fromStringIO(string_data):
lines = string_data.split("\n")
string = []
tcker_lst = []
for line in lines:
line = line.replace('-\n', '')
line = line.replace('\r', '')
line = line.replace(',,', ',')
line = line.replace(', ', ',')
string.append(line)
tckerItemPerLine = line.split(",")
for tckers in tckerItemPerLine:
if tckers.startswith("'") and tckers.endswith("'"):
tckers = tckers[1:-1]
elif tckers.startswith('"') and tckers.endswith('"'):
tckers = tckers[1:-1]
tckers = tckers.upper()
if tckers != " ":
if tckers != "":
tcker_lst.append(tckers)
return tcker_lst
#convert datetime object to string of length 10
def convertDateTimeToDateStrLen10(dateTimeObj):
StringLen10 = dateTimeObj.strftime("%Y-%m-%d")
return StringLen10
#convert string of length 10 to string of length 19
def convertDateStrLen10toDateStrLen19(StringLen10):
if len(StringLen10) == 10:
StringLen19 = f'{StringLen10} 00:00:00'
else:
pass
return StringLen19
#convert string of length 10 to datetime object
def convertDateStrLen10toDateTime(StringLen10):
StringLen19 = convertDateStrLen10toDateStrLen19(StringLen10)
datetimeObj = datetime.strptime(StringLen19, "%Y-%m-%d %H:%M:%S")
return datetimeObj
#convert string of length 10 to datetime object with NoHrsMinSecs
def convertDateStrLen10toDateTimeNoHrsMinSecs(StringLen10):
datetimeObj = datetime.strptime(StringLen10, "%Y-%m-%d")
return datetimeObj
# define Python user-defined exceptions
class Error(Exception):
"""Base class for other exceptions"""
pass
class CatchJsonError(Error):
"""Raised when error code from json object"""
"""Exception raised for errors in the input salary.
Attributes:
salary -- input salary which caused the error
message -- explanation of the error
"""
def __init__(self, code, message, status):
self.code = code
self.message = message
self.status = message
super().__init__(self.message)
def __str__(self):
st.error(f'code: {code} \nmessage:{message} \nstatus: {status}')
return f'{self.code} \n{self.message} \nstatus: {status}'
# WORK IN PROGRESS!!!!!!! check json object for api errors
def chkJsonObj(json_object):
#get the keys of json object dictr
keys = json_object.keys()
#check if error code json object exist in our json object keys
error_keys = ['code', 'message', 'status']
trueCnt = 0
falseCnt = 0
""" trueCnt values above zero tells us the error keys exist as keys of json dict
falseCnt values above zero tells us at least 1 error keys is not a member key of json dict"""
for error_key in error_keys:
testval = error_key in keys
if testval == True:
trueCnt += 1
else:
falseCnt += 1
# tells us this is a valid json object and returns it for onward use
if falseCnt > 0:
return json_object
# tells us this json object is an error json thrown out
else:
code = json_object['code']
message = json_object['message']
status = json_object['status']
raise CatchJsonError(code, message, status)
# def function to get earliest timestamp for each stock ticker needs checking/running!!!!!!
def getTickerEarliesrTimeStamp(twelvedata_api_key, ticker):
data_types = ["earliest_timestamp"]
#twelvedata_url = f'https://api.twelvedata.com/{data_types}?symbol={ticker}&interval=1day&apikey={twelvedata_api_key}'
twelvedata_url = f'https://api.twelvedata.com/earliest_timestamp?symbol={ticker}&interval=1day&apikey={twelvedata_api_key}'
json_object = requests.get(twelvedata_url).json()
json_object = chkJsonObj(json_object)
#session = requests.Session()
# In case I run into issues, retry my connection
#retries = Retry(total=5, backoff_factor=0.1, status_forcelist=[ 500, 502, 503, 504 ])
#session.mount('http://', HTTPAdapter(max_retries=retries))
# Initial request to get the ticker count
#r = session.get(twelvedata_url)
#json_object = r.json()
datetime_data = json_object['datetime']
unix_time_data = json_object['unix_time']
mydict = {}
mydict['datetime_data'] = datetime_data
mydict['unix_time_data'] = unix_time_data
return mydict
# function adds atime interval to a date using relative time delta calculations
def addRelTimeDelta(date_dt, timeIntervalValue, timeIntervalUnit):
errorcode = 0
if timeIntervalUnit == "seconds":
rel_delta = relativedelta(seconds=timeIntervalValue)
elif timeIntervalUnit == "minutes":
rel_delta = relativedelta(minutes=timeIntervalValue)
elif timeIntervalUnit == "hours":
rel_delta = relativedelta(hours=timeIntervalValue)
elif timeIntervalUnit == "days":
rel_delta = relativedelta(days=timeIntervalValue)
elif timeIntervalUnit == "weeks":
rel_delta = relativedelta(weeks=timeIntervalValue)
elif timeIntervalUnit == "months":
rel_delta = relativedelta(months=timeIntervalValue)
elif timeIntervalUnit == "years":
rel_delta = relativedelta(years=timeIntervalValue)
else:
errorcode = 1
#datetime_object = datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')
datetime_object = date_dt
datetime_object += rel_delta
new_date_str = datetime_object.strftime('%Y-%m-%d %H:%M:%S')
new_date_dt = datetime.strptime(new_date_str, '%Y-%m-%d %H:%M:%S')
return new_date_dt
""" # returns list of calculated start/end time/date that program will run to get complete range of data:
use this to circumspect the 5000entries limit per api call
"""
@st.cache(suppress_st_warning=True)
def getStartStopRngeLst(symbol, interval, start_date_dt, end_date_dt):
# required data
maxRequestPerDay_freekey = 800
maxNosDataPts = 5000
useNosDataPts = 4500
interval_lst = ['1min', '5min', '15min', '30min',
'45min', '1h', '2h', '4h', '1day', '1week', '1month']
intervalQty_lst = [1, 5, 15, 30, 45,
60, 120, 240, 1440, 10080, 44640]
start_date_str = start_date_dt.strftime("%Y-%m-%d %H:%M:%S")
end_date_str = end_date_dt.strftime("%Y-%m-%d %H:%M:%S")
# get difference between start and end dates
#timedelta_raw = parsed_end - parsed_start
timedelta_raw = end_date_dt - start_date_dt
timedeltaInSeconds_int = timedelta_raw.total_seconds()
timedeltaInMinutes_int = timedeltaInSeconds_int / 60
# dictionary of mins/bar mapped against interval ie 5mins:5
interval_intervalQty_dict = {
interval_lst[i]: intervalQty_lst[i] for i in range(len(interval_lst))}
intervalInMinutes = interval_intervalQty_dict[interval]
#calculate total nos of datapoints
data_pts_total = math.ceil(timedeltaInMinutes_int/intervalInMinutes)
#if data_pts_total <= useNosDataPts:
# use_Rnge_per_Request_int = useNosDataPts * data_pts_total
# use_Rnge_per_Request_datetime = timedelta(seconds=(timedeltaInMinutes_int * 60))
#else:
# use_Rnge_per_Request_int = useNosDataPts * data_pts_total
# use_Rnge_per_Request_datetime = timedelta(seconds=(useNosDataPts * intervalInMinutes * 60))
#calculate the total number of start/stop pairs or requests to make
nosReq = math.ceil(data_pts_total/useNosDataPts)
# we are creating lists of start date/enddate/time interval
symbol_namn_lst = []
start_time_lst = []
end_time_lst = []
interval_lst = []
intervalinMin_lst = []
data_pts_lst = []
chartRnge_lst = []
cnt = 0
leftOverDatapts = data_pts_total
for nos in range(nosReq):
# populate entries
if nos == 0:
start_time_entry = start_date_dt
else:
start_time_entry = end_time_lst[nos - 1]
# can switch to max_Rnge_per_Request_int instead of use_Rnge_per_Request_int
interval_entry = intervalInMinutes
if data_pts_total <= useNosDataPts:
data_pts_entry = data_pts_total
else:
data_pts_entry = useNosDataPts
# can switch to max_Rnge_per_Request_int instead of use_Rnge_per_Request_int
ChartRnge_entry = data_pts_total
# correct the last data_pts_entry
#st.write(f'a1 {cnt}: {leftOverDatapts} : {data_pts_total} : {data_pts_entry}')
if leftOverDatapts >= data_pts_entry:
leftOverDatapts = leftOverDatapts - data_pts_entry
else:
data_pts_entry = leftOverDatapts
#st.write(f'a2 {cnt}: {leftOverDatapts} : {data_pts_total} : {data_pts_entry}')
use_Rnge_per_Request_int = data_pts_entry * intervalInMinutes * 60
use_Rnge_per_Request_datetime = timedelta(seconds=(use_Rnge_per_Request_int))
end_time_entry = start_time_entry + use_Rnge_per_Request_datetime
# populate lists
start_time_lst.append(start_time_entry)
end_time_lst.append(end_time_entry)
interval_lst.append(interval)
intervalinMin_lst.append(interval_entry)
data_pts_lst.append(data_pts_entry)
chartRnge_lst.append(ChartRnge_entry)
symbol_namn_lst.append(symbol)
cnt += 1
# lets create dataframe from lists created above
chartTSInput_dict = {"symbol": symbol_namn_lst, "start_time": start_time_lst, "end_time": end_time_lst,
"interval": interval_lst, "interval_mins": intervalinMin_lst,
"data_pts_limit": data_pts_lst, "data_pts_all": chartRnge_lst}
chartTSInput_df = pd.DataFrame(chartTSInput_dict)
#st.dataframe(chartTSInput_df)
return chartTSInput_df, maxRequestPerDay_freekey
#function to get list of time series for each symbol in a list of symbols
def getAllSymbolTimeSeries_dfs(twelvedata_api_key, symbol_startend_dict):
symbol_list = symbol_startend_dict["symbol"]
allSymb_startEnd_lst = symbol_startend_dict["start_stop_data"]
all_symb_dc_lst = []
cnt = 0
for symb_df in allSymb_startEnd_lst:
symb_dc = getSymbol_dc(twelvedata_api_key, symb_df)
all_symb_dc_lst.append(symb_dc)
cnt += 1
res_dct = {symbol_list[i]: all_symb_dc_lst[i] for i in range(len(symbol_list))}
return res_dct
# function to get time series for each row of a symbol df in a list of symbols
def getSymbol_dc(twelvedata_api_key, symb_df):
cnt = 0
symb_cnt = len(symb_df['symbol'].unique())
if symb_cnt == 1:
symb_tsData_df_lst = []
symb_tsMeta_df_lst = []
symb_tsError_df_lst = []
start_time = ''
data_pts = 0
for indx in symb_df.index:
ticker = symb_df['symbol'][indx]
if indx == 0:
start_time = symb_df['start_time'][indx]
start_time_dc = symb_df['start_time'][indx]
else:
start_time = symb_df['start_time'][indx]
end_time = symb_df['end_time'][indx]
interval = symb_df['interval'][indx]
data_pts = symb_df['data_pts_limit'][indx] + data_pts
tries = 0
sleepVal = 3
msg_all = ''
tsData_df, tsMeta_df, tsError_df = get_TS_12Data(twelvedata_api_key, ticker, interval, start_time, end_time, data_pts, tries, sleepVal, msg_all)
tsDataLen = len(tsData_df.index)
if tsDataLen != 0:
symb_tsData_df_lst.append(tsData_df)
symb_tsMeta_df_lst.append(tsMeta_df)
tsErrorLen = len(tsError_df.index)
if tsErrorLen != 0:
symb_tsError_df_lst.append(tsError_df)
tsErrorLstLen = len(symb_tsError_df_lst)
if tsErrorLstLen == 0:
cnt = 0
testMetaDf = 0
for df_meta in symb_tsMeta_df_lst:
if cnt == 0:
compare_df_meta = df_meta
else:
if compare_df_meta.equals(df_meta) != False:
testMetaDf = 1
if testMetaDf == 0:
df_dc_tsData_all = pd.concat(symb_tsData_df_lst, axis=0)
ticker_dc = singleTickerData(ticker=ticker,
interval=interval,
start_date=start_time_dc,
end_date=end_time,
outputsize=data_pts,
df_tsMeta=compare_df_meta,
df_tsData=df_dc_tsData_all,
)
return ticker_dc
else:
st.error(f'meta df error')
else:
st.error(f'error df error')
def get_TS_12Data(twelvedata_api_key, ticker, interval, start_time, end_time, data_pts, tries, sleepVal, msg_all):
maxtry = 20
# TwelveData Work
data_type = "time_series"
value_type = ["values"]
# initialise empty dataframes
tsMeta_df = pd.DataFrame()
tsData_df = pd.DataFrame()
tsError_df = pd.DataFrame()
counter = 0
twelvedata_url = f"https://api.twelvedata.com/{data_type}?symbol={ticker}&start_date={start_time}&end_date={end_time}&interval={interval}&apikey={twelvedata_api_key}"
json_object = requests.get(twelvedata_url).json()
#json_object = chkJsonObj(json_object)
# check if status key exists?
if ('status' in json_object):
status_exist = True
else:
status_exist = False
if (status_exist == True):
status_data = json_object['status']
#check status value says ok before we extract data
if status_data == 'ok':
msg = f'Status Key Check Ok!!!: TS Data Extraction Successful \nstatus_data == ok for ticker: {ticker}|interval {interval}Start time: start_time|output size: {data_pts}'
msg_all = msg_all + msg + ' \n'
status_mess = status_data
meta_data = json_object['meta']
# will add start and end dates to meta_df
tsMeta_df = pd.DataFrame(meta_data, index=[0])
value_data = json_object[f'{value_type[counter]}']
# will add start and end dates to meta_df
tsData_df = pd.DataFrame(value_data)
tsData_df = tsData_df.iloc[::-1].reset_index(drop=True)
elif status_data == 'error':
code_data = json_object['code']
if code_data == 429:
if tries == (maxtry + 1):
msg = f'Time Series Data Extraction Failed!!!! \nstatus_data == error for ticker: {ticker}|interval {interval}Start time: start_time|output size: {data_pts}'
msg_all = msg_all + msg + ' \n'
status_mess = status_data
code_data = json_object['code']
mess_data = json_object['message']
error_data = json_object
error_df = pd.DataFrame(json_object, index=[0])
if data_type == "time_series":
tsError_df = error_df
else:
status_mess = status_data
tries += 1
sleepVal += 1
msg = f'Retry TS Data API call for {ticker}|interval {interval}|Start time: {start_time}|End time: {start_time}output size: {data_pts} \nRetrying for |Times: {tries}|Secs of Sleep: {sleepVal}'
msg_all = msg_all + msg + ' \n'
if sleepVal > 0:
sleep(sleepVal)
st.write(f'{msg_all}')
get_TS_12Data(twelvedata_api_key, ticker, interval, start_time, end_time, data_pts, tries, sleepVal, msg_all)
elif code_data == 400:
message_data = json_object['message']
st.error(f'ticker: {ticker} start_time:{start_time} end_time:{end_time} \nerror code: {code_data} \nmessage: {message_data} \nstatus: {status_data}')
else:
message_data = json_object['message']
st.error(f'ticker: {ticker} start_time:{start_time} end_time:{end_time} \nerror code: {code_data} \nmessage: {message_data} \nstatus: {status_data}')
elif (status_exist == False):
msg = f'Status Key Does not exist!!!: Will attempt TS Data Extraction from odd json object \nstatus_data not exist for ticker:{ticker}|interval {interval}|Start time: start_time|output size: {data_pts}'
msg_all = msg_all + msg + ' \n'
st.write('status_exist == False')
status_mess = 'status key not exist'
meta_data = json_object['meta']
# will add start and end dates to meta_df
tsMeta_df = | pd.DataFrame(meta_data, index=[0]) | pandas.DataFrame |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.