prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Copyright 2020 <NAME> <<EMAIL>>
# Open source under the MIT license (see LICENSE)
# pylint: disable=import-error
import pandas as pd
class MappingBase:
key_name = None
def __init__(self, mapping):
if self.key_name not in mapping.columns:
raise ValueError('Mapping must have column {}'.format(self.key_name))
if 'pft' not in mapping.columns:
raise ValueError('Mapping must have column pft')
self._mapping = mapping
@property
def mapping(self): return self._mapping
class BiomePftMapping(MappingBase):
key_name = 'biome'
class TaxaPftMapping(MappingBase):
key_name = 'taxa'
class BiomePftMatrix(BiomePftMapping):
def __init__(self, matrix):
super().__init__(convert_matrix_to_mapping(matrix, self.key_name))
class TaxaPftMatrix(TaxaPftMapping):
def __init__(self, matrix):
super().__init__(convert_matrix_to_mapping(matrix, self.key_name))
class PftListBase:
@staticmethod
def _convert_list_to_mapping(df, key_name):
df = clean_column_name(df, 0, key_name)
df = clean_column_name(df, 1, 'pft')
return df.explode('pft')
@classmethod
def read_csv(cls, filepath_or_buffer, **kwargs):
key_name = cls.key_name # pylint: disable=no-member
raw = | pd.read_csv(filepath_or_buffer, dtype=str, header=None, **kwargs) | pandas.read_csv |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from datetime import datetime
import errno
from os import makedirs, environ
from os.path import expanduser, join, getmtime, isdir
import warnings
import numpy as np
from IPython.display import display
import pandas as pd
from pandas.tseries.offsets import BDay
from pandas_datareader import data as web
from . import pos
from . import txn
APPROX_BDAYS_PER_MONTH = 21
APPROX_BDAYS_PER_YEAR = 252
MONTHS_PER_YEAR = 12
WEEKS_PER_YEAR = 52
MM_DISPLAY_UNIT = 1000000.
DAILY = 'daily'
WEEKLY = 'weekly'
MONTHLY = 'monthly'
YEARLY = 'yearly'
ANNUALIZATION_FACTORS = {
DAILY: APPROX_BDAYS_PER_YEAR,
WEEKLY: WEEKS_PER_YEAR,
MONTHLY: MONTHS_PER_YEAR
}
def cache_dir(environ=environ):
try:
return environ['PYFOLIO_CACHE_DIR']
except KeyError:
return join(
environ.get(
'XDG_CACHE_HOME',
expanduser('~/.cache/'),
),
'pyfolio',
)
def data_path(name):
return join(cache_dir(), name)
def ensure_directory(path):
"""
Ensure that a directory named "path" exists.
"""
try:
makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST or not isdir(path):
raise
def one_dec_places(x, pos):
"""
Adds 1/10th decimal to plot ticks.
"""
return '%.1f' % x
def two_dec_places(x, pos):
"""
Adds 1/100th decimal to plot ticks.
"""
return '%.2f' % x
def percentage(x, pos):
"""
Adds percentage sign to plot ticks.
"""
return '%.0f%%' % x
def get_utc_timestamp(dt):
"""
Returns the Timestamp/DatetimeIndex
with either localized or converted to UTC.
Parameters
----------
dt : Timestamp/DatetimeIndex
the date(s) to be converted
Returns
-------
same type as input
date(s) converted to UTC
"""
dt = pd.to_datetime(dt)
try:
dt = dt.tz_localize('UTC')
except TypeError:
dt = dt.tz_convert('UTC')
return dt
_1_bday = BDay()
def _1_bday_ago():
return pd.Timestamp.now().normalize() - _1_bday
def format_asset(asset):
"""
If zipline asset objects are used, we want to print them out prettily
within the tear sheet. This function should only be applied directly
before displaying.
"""
try:
import zipline.assets
except:
return asset
if isinstance(asset, zipline.assets.Asset):
return asset.symbol
else:
return asset
def get_returns_cached(filepath, update_func, latest_dt, **kwargs):
"""
Get returns from a cached file if the cache is recent enough,
otherwise, try to retrieve via a provided update function and
update the cache file.
Parameters
----------
filepath : str
Path to cached csv file
update_func : function
Function to call in case cache is not up-to-date.
latest_dt : pd.Timestamp (tz=UTC)
Latest datetime required in csv file.
**kwargs : Keyword arguments
Optional keyword arguments will be passed to update_func()
Returns
-------
pandas.DataFrame
DataFrame containing returns
"""
update_cache = False
try:
mtime = getmtime(filepath)
except OSError as e:
if e.errno != errno.ENOENT:
raise
update_cache = True
else:
file_dt = pd.Timestamp(mtime, unit='s')
if latest_dt.tzinfo:
file_dt = file_dt.tz_localize('utc')
if file_dt < latest_dt:
update_cache = True
else:
returns = | pd.read_csv(filepath, index_col=0, parse_dates=True) | pandas.read_csv |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
##find parent directory and import model
#parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
#sys.path.append(parentddir)
from ..agdrift_exe import Agdrift
test = {}
class TestAgdrift(unittest.TestCase):
"""
IEC unit tests.
"""
def setUp(self):
"""
setup the test as needed
e.g. pandas to open agdrift qaqc csv
Read qaqc csv and create pandas DataFrames for inputs and expected outputs
:return:
"""
pass
def tearDown(self):
"""
teardown called after each test
e.g. maybe write test results to some text file
:return:
"""
pass
def create_agdrift_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty agdrift object
agdrift_empty = Agdrift(df_empty, df_empty)
return agdrift_empty
def test_validate_sim_scenarios(self):
"""
:description determines if user defined scenarios are valid for processing
:param application_method: type of Tier I application method employed
:param aquatic_body_def: type of endpoint of concern (e.g., pond, wetland); implies whether
: endpoint of concern parameters (e.g.,, pond width) are set (i.e., by user or EPA standard)
:param drop_size_*: qualitative description of spray droplet size for aerial & ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of orchard being sprayed
:NOTE we perform an additional validation check related to distances later in the code just before integration
:return
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.out_sim_scenario_chk = pd.Series([], dtype='object')
expected_result = pd.Series([
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid Tier I Aquatic Aerial Scenario',
'Invalid Tier I Aquatic Ground Scenario',
'Invalid Tier I Aquatic Airblast Scenario',
'Invalid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid scenario ecosystem_type',
'Invalid Tier I Aquatic Assessment application method',
'Invalid Tier I Terrestrial Assessment application method'],dtype='object')
try:
#set test data
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.application_method = pd.Series(
['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'Tier II Aerial',
'Tier III Aerial'], dtype='object')
agdrift_empty.ecosystem_type = pd.Series(
['aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'Field Assessment',
'aquatic_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(
['epa_defined_pond',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'Defined Pond',
'user_defined_pond',
'epa_defined_pond',
'NaN',
'NaN',
'NaN',
'epa_defined_pond',
'user_defined_wetland',
'user_defined_pond'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(
['NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'user_defined_terrestrial',
'user_defined_terrestrial',
'NaN',
'NaN',
'user_defined_terrestrial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(
['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'fine_to_medium',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'medium_to_coarse',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine Indeed',
'NaN',
'very_fine_to_medium',
'medium_to_coarse',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'NaN',
'fine_to_medium-coarse',
'very_fine',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine'], dtype='object')
agdrift_empty.boom_height = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'low',
'high',
'low',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'NaN',
'NaN',
'NaN',
'NaN'],dtype='object')
agdrift_empty.airblast_type = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'orchard',
'vineyard',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'vineyard',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.validate_sim_scenarios()
result = agdrift_empty.out_sim_scenario_chk
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_sim_scenario_id(self):
"""
:description provides scenario ids per simulation that match scenario names (i.e., column_names) from SQL database
:param out_sim_scenario_id: scenario name as assigned to individual simulations
:param num_simulations: number of simulations to assign scenario names
:param out_sim_scenario_chk: from previous method where scenarios were checked for validity
:param application_method: application method of scenario
:param drop_size_*: qualitative description of spray droplet size for aerial and ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of airblast application (e.g., vineyard, orchard)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series(['aerial_vf2f',
'aerial_f2m',
'aerial_m2c',
'aerial_c2vc',
'ground_low_vf',
'ground_low_fmc',
'ground_high_vf',
'ground_high_fmc',
'airblast_normal',
'airblast_dense',
'airblast_sparse',
'airblast_vineyard',
'airblast_orchard',
'Invalid'], dtype='object')
try:
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.out_sim_scenario_chk = pd.Series(['Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Invalid Scenario'], dtype='object')
agdrift_empty.application_method = pd.Series(['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.boom_height = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'low',
'low',
'high',
'high',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.airblast_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'vineyard',
'orchard',
'NaN'], dtype='object')
agdrift_empty.set_sim_scenario_id()
result = agdrift_empty.out_sim_scenario_id
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_assign_column_names(self):
"""
:description assigns column names (except distaqnce column) from sql database to internal scenario names
:param column_name: short name for pesiticide application scenario for which distance vs deposition data is provided
:param scenario_name: internal variable for holding scenario names
:param scenario_number: index for scenario_name (this method assumes the distance values could occur in any column
:param distance_name: internal name for the column holding distance data
:NOTE to test both outputs of this method I simply appended them together
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.scenario_name = pd.Series([], dtype='object')
expected_result = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard'], dtype='object')
try:
agdrift_empty.column_names = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard', 'distance_ft'])
#call method to assign scenario names
agdrift_empty.assign_column_names()
result = agdrift_empty.scenario_name
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_distances(self):
"""
:description retrieves distance values for deposition scenario datasets
: all scenarios use same distances
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result = pd.Series([], dtype='float')
try:
expected_result = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632]
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.num_db_values = len(expected_result)
result = agdrift_empty.get_distances(agdrift_empty.num_db_values)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_scenario_deposition_data(self):
"""
:description retrieves deposition data for all scenarios from sql database
: and checks that for each the first, last, and total number of values
: are correct
:param scenario: name of scenario for which data is to be retrieved
:param num_values: number of values included in scenario datasets
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
#scenario_data = pd.Series([[]], dtype='float')
result = pd.Series([], dtype='float')
#changing expected values to the 161st
expected_result = [0.50013,0.041273,161.0, #aerial_vf2f
0.49997,0.011741,161.0, #aerial_f2m
0.4999,0.0053241,161.0, #aerial_m2c
0.49988,0.0031189,161.0, #aerial_c2vc
1.019339,9.66E-04,161.0, #ground_low_vf
1.007885,6.13E-04,161.0, #ground_low_fmc
1.055205,1.41E-03,161.0, #ground_high_vf
1.012828,7.72E-04,161.0, #ground_high_fmc
8.91E-03,3.87E-05,161.0, #airblast_normal
0.1155276,4.66E-04,161.0, #airblast_dense
0.4762651,5.14E-05,161.0, #airblast_sparse
3.76E-02,3.10E-05,161.0, #airblast_vineyard
0.2223051,3.58E-04,161.0] #airblast_orchard
try:
agdrift_empty.num_db_values = 161 #set number of data values in sql db
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
#agdrift_empty.db_name = 'sqlite_agdrift_distance.db'
#this is the list of scenario names (column names) in sql db (the order here is important because
#the expected values are ordered in this manner
agdrift_empty.scenario_name = ['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
#cycle through reading scenarios and building result list
for i in range(len(agdrift_empty.scenario_name)):
#get scenario data
scenario_data = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name[i],
agdrift_empty.num_db_values)
print(scenario_data)
#extract 1st and last values of scenario data and build result list (including how many values are
#retrieved for each scenario
if i == 0:
#fix this
result = [scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))]
else:
result.extend([scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))])
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_column_names(self):
"""
:description retrieves column names from sql database (sqlite_agdrift_distance.db)
: (each column name refers to a specific deposition scenario;
: the scenario name is used later to retrieve the deposition data)
:parameter output name of sql database table from which to retrieve requested data
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
result = pd.Series([], dtype='object')
expected_result = ['distance_ft','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
try:
result = agdrift_empty.get_column_names()
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_filter_arrays(self):
"""
:description eliminate blank data cells (i.e., distances for which no deposition value is provided)
(and thus reduce the number of x,y values to be used)
:parameter x_in: array of distance values associated with values for a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter y_in: array of deposition values associated with a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter x_out: processed array of x_in values eliminating indices of blank distance/deposition values
:parameter y_out: processed array of y_in values eliminating indices of blank distance/deposition values
:NOTE y_in array is assumed to be populated by values >= 0. except for the blanks as 'nan' entries
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([0.,1.,4.,5.,6.,7.], dtype='float')
expected_result_y = pd.Series([10.,11.,14.,15.,16.,17.], dtype='float')
try:
x_in = pd.Series([0.,1.,2.,3.,4.,5.,6.,7.], dtype='float')
y_in = pd.Series([10.,11.,'nan','nan',14.,15.,16.,17.], dtype='float')
x_out, y_out = agdrift_empty.filter_arrays(x_in, y_in)
result_x = x_out
result_y = y_out
npt.assert_allclose(result_x, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(result_y, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result_x, expected_result_x]
tab = [result_y, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_list_sims_per_scenario(self):
"""
:description scan simulations and count number and indices of simulations that apply to each scenario
:parameter num_scenarios number of deposition scenarios included in SQL database
:parameter num_simulations number of simulations included in this model execution
:parameter scenario_name name of deposition scenario as recorded in SQL database
:parameter out_sim_scenario_id identification of deposition scenario specified per model run simulation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_num_sims = pd.Series([2,2,2,2,2,2,2,2,2,2,2,2,2], dtype='int')
expected_sim_indices = pd.Series([[0,13,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[2,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[3,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[4,17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[5,18,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[6,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[7,20,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[8,21,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[9,22,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[10,23,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[11,24,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[12,25,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]], dtype='int')
try:
agdrift_empty.scenario_name = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.out_sim_scenario_id = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.num_simulations = len(agdrift_empty.out_sim_scenario_id)
agdrift_empty.num_scenarios = len(agdrift_empty.scenario_name)
result_num_sims, result_sim_indices = agdrift_empty.list_sims_per_scenario()
npt.assert_array_equal(result_num_sims, expected_num_sims, err_msg='', verbose=True)
npt.assert_array_equal(result_sim_indices, expected_sim_indices, err_msg='', verbose=True)
finally:
tab = [result_num_sims, expected_num_sims, result_sim_indices, expected_sim_indices]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_determine_area_dimensions(self):
"""
:description determine relevant area/length/depth of waterbody or terrestrial area
:param i: simulation number
:param ecosystem_type: type of assessment to be conducted
:param aquatic_body_type: source of dimensional data for area (EPA or User defined)
:param terrestrial_field_type: source of dimensional data for area (EPA or User defined)
:param *_width: default or user specified width of waterbody or terrestrial field
:param *_length: default or user specified length of waterbody or terrestrial field
:param *_depth: default or user specified depth of waterbody or terrestrial field
:NOTE all areas, i.e., ponds, wetlands, and terrestrial fields are of 1 hectare size; the user can elect
to specify a width other than the default width but it won't change the area size; thus for
user specified areas the length is calculated and not specified by the user)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_width = pd.Series([208.7, 208.7, 100., 400., 150., 0.], dtype='float')
expected_length = pd.Series([515.8, 515.8, 1076.39, 269.098, 717.593, 0.], dtype='float')
expected_depth = pd.Series([6.56, 0.4921, 7., 23., 0., 0.], dtype='float')
try:
agdrift_empty.ecosystem_type = pd.Series(['aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(['epa_defined_pond',
'epa_defined_wetland',
'user_defined_pond',
'user_defined_wetland',
'NaN',
'NaN'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'epa_defined_terrestrial'], dtype='object')
num_simulations = len(agdrift_empty.ecosystem_type)
agdrift_empty.default_width = 208.7
agdrift_empty.default_length = 515.8
agdrift_empty.default_pond_depth = 6.56
agdrift_empty.default_wetland_depth = 0.4921
agdrift_empty.user_pond_width = pd.Series(['NaN', 'NaN', 100., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_pond_depth = pd.Series(['NaN', 'NaN', 7., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_width = pd.Series(['NaN', 'NaN', 'NaN', 400., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_depth = pd.Series(['NaN','NaN', 'NaN', 23., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_terrestrial_width = pd.Series(['NaN', 'NaN', 'NaN', 'NaN', 150., 'NaN'], dtype='float')
width_result = pd.Series(num_simulations * ['NaN'], dtype='float')
length_result = pd.Series(num_simulations * ['NaN'], dtype='float')
depth_result = pd.Series(num_simulations * ['NaN'], dtype='float')
agdrift_empty.out_area_width = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.out_area_length = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.out_area_depth = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.sqft_per_hectare = 107639
for i in range(num_simulations):
width_result[i], length_result[i], depth_result[i] = agdrift_empty.determine_area_dimensions(i)
npt.assert_allclose(width_result, expected_width, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(length_result, expected_length, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(depth_result, expected_depth, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [width_result, expected_width, length_result, expected_length, depth_result, expected_depth]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_foa(self):
"""
:description calculation of average deposition over width of water body
:param integration_result result of integration of deposition curve across the distance
: beginning at the near distance and extending to the far distance of the water body
:param integration_distance effectively the width of the water body
:param avg_dep_foa average deposition rate across the width of the water body
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([0.1538462, 0.5, 240.])
try:
integration_result = pd.Series([1.,125.,3e5], dtype='float')
integration_distance = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_foa(integration_result, integration_distance)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac(self):
"""
Deposition calculation.
:param avg_dep_foa: average deposition over width of water body as fraction of applied
:param application_rate: actual application rate
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([6.5, 3.125e4, 3.75e8])
try:
avg_dep_foa = pd.Series([1.,125.,3e5], dtype='float')
application_rate = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_lbac(avg_dep_foa, application_rate)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_foa_from_lbac(self):
"""
Deposition calculation.
:param avg_dep_foa: average deposition over width of water body as fraction of applied
:param application_rate: actual application rate
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.553846e-01, 8.8e-06, 4.e-08])
try:
avg_dep_lbac = pd.Series([1.01, 0.0022, 0.00005], dtype='float')
application_rate = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_foa_from_lbac(avg_dep_lbac, application_rate)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_gha(self):
"""
Deposition calculation.
:param avg_dep_gha: average deposition over width of water body in units of grams/hectare
:param gms_per_lb: conversion factor to convert lbs to grams
:param acres_per_hectare: conversion factor to convert hectares to acres
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([0.01516739, 0.111524, 0.267659])
try:
avg_dep_gha = pd.Series([17., 125., 3e2], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.471
result = agdrift_empty.calc_avg_dep_lbac_from_gha(avg_dep_gha)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_waterconc_ngl(self):
"""
:description calculate the average deposition onto the pond/wetland/field
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_width: average width of water body
:parem area_length: average length of water body
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param ng_per_gram conversion factor
:param sqft_per_acre conversion factor
:param liters_per_ft3 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([2.311455e-05, 2.209479e-03, 2.447423e-03])
try:
avg_waterconc_ngl = pd.Series([17., 125., 3e2], dtype='float')
area_width = pd.Series([50., 200., 500.], dtype='float')
area_length = pd.Series([6331., 538., 215.], dtype='float')
area_depth = pd.Series([0.5, 6.5, 3.], dtype='float')
agdrift_empty.liters_per_ft3 = 28.3168
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.ng_per_gram = 1.e9
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.471
result = agdrift_empty.calc_avg_dep_lbac_from_waterconc_ngl(avg_waterconc_ngl, area_width,
area_length, area_depth)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_mgcm2(self):
"""
:description calculate the average deposition of pesticide over the terrestrial field in lbs/acre
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param mg_per_gram conversion factor
:param sqft_per_acre conversion factor
:param cm2_per_ft2 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([2.676538e-02, 2.2304486, 44.608973])
try:
avg_fielddep_mgcm2 = pd.Series([3.e-4, 2.5e-2, 5.e-01])
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.cm2_per_ft2 = 929.03
agdrift_empty.mg_per_gram = 1.e3
result = agdrift_empty.calc_avg_dep_lbac_from_mgcm2(avg_fielddep_mgcm2)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_gha(self):
"""
:description average deposition over width of water body in grams per acre
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param gms_per_lb: conversion factor to convert lbs to grams
:param acres_per_hectare: conversion factor to convert acres to hectares
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.401061, 0.3648362, 0.03362546])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.47105
result = agdrift_empty.calc_avg_dep_gha(avg_dep_lbac)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_waterconc_ngl(self):
"""
:description calculate the average concentration of pesticide in the pond/wetland
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_width: average width of water body
:parem area_length: average length of water body
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param ng_per_gram conversion factor
:param sqft_per_acre conversion factor
:param liters_per_ft3 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([70.07119, 18.24654, 22.41823])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
area_width = pd.Series([6.56, 208.7, 997.], dtype='float')
area_length = pd.Series([1.640838e4, 515.7595, 107.9629], dtype='float')
area_depth = pd.Series([6.56, 6.56, 0.4921], dtype='float')
agdrift_empty.ng_per_gram = 1.e9
agdrift_empty.liters_per_ft3 = 28.3168
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.sqft_per_acre = 43560.
result = agdrift_empty.calc_avg_waterconc_ngl(avg_dep_lbac ,area_width, area_length, area_depth)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_fielddep_mgcm2(self):
"""
:description calculate the average deposition of pesticide over the terrestrial field
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param mg_per_gram conversion factor
:param sqft_per_acre conversion factor
:param cm2_per_ft2 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.401063e-5, 3.648369e-6, 3.362552e-7])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.mg_per_gram = 1.e3
agdrift_empty.cm2_per_ft2 = 929.03
result = agdrift_empty.calc_avg_fielddep_mgcm2(avg_dep_lbac)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016]
expected_result_y = [0.364712246,0.351507467,0.339214283,0.316974687,0.279954504,0.225948786,0.159949625,
0.123048839,0.099781801,0.071666234,0.056352938,0.03860139,0.029600805,0.024150524,
0.020550354,0.01795028,0.015967703,0.014467663,0.013200146,0.01215011,0.011300098,
0.010550085,0.009905072,0.009345065,0.008845057,0.008400051,0.008000046,0.007635043,
0.007300039,0.007000034,0.006725033,0.00646503,0.006230027,0.006010027,0.005805023,
0.005615023,0.005435021,0.00527002,0.00511002,0.004960017,0.004820017,0.004685016,
0.004560015,0.004440015,0.004325013,0.004220012,0.004120012,0.004020012,0.003925011,
0.003835011,0.00375001,0.00367001,0.00359001,0.00351001,0.003435009,0.003365009,
0.003300007,0.003235009,0.003170007,0.003110007,0.003055006,0.003000007,0.002945006,
0.002895006,0.002845006,0.002795006,0.002745006,0.002695006,0.002650005,0.002610005,
0.002570005,0.002525006,0.002485004,0.002450005,0.002410005,0.002370005,0.002335004,
0.002300005,0.002265004,0.002235004,0.002205004,0.002175004,0.002145004,0.002115004,
0.002085004,0.002055004,0.002025004,0.002000002,0.001975004,0.001945004,0.001920002,
0.001900002,0.001875004,0.001850002,0.001830002,0.001805004,0.001780002,0.001760002,
0.001740002,0.001720002,0.001700002,0.001680002,0.001660002,0.001640002,0.001620002,
0.001605001,0.001590002,0.001570002,0.001550002,0.001535001,0.001520002,0.001500002,
0.001485001,0.001470002,0.001455001,0.001440002,0.001425001,0.001410002,0.001395001,
0.001385001,0.001370002,0.001355001,0.001340002,0.001325001,0.001315001,0.001305001,
0.001290002,0.001275001,0.001265001,0.001255001,0.001245001,0.001230002,0.001215001,
0.001205001,0.001195001,0.001185001,0.001175001,0.001165001,0.001155001,0.001145001,
0.001135001,0.001125001,0.001115001,0.001105001,0.001095001,0.001085001,0.001075001,
0.001065001,0.00106,0.001055001,0.001045001,0.001035001,0.001025001,0.001015001,
0.001005001,0.0009985,0.000993001,0.000985001,0.000977001,0.000969501]
expected_result_npts = 160
x_dist = 6.56
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.scenario_name = 'ground_low_vf'
agdrift_empty.num_db_values = 161
x_array_in = agdrift_empty.get_distances(agdrift_empty.num_db_values)
y_array_in = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name, agdrift_empty.num_db_values)
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(agdrift_empty.num_db_values,
x_array_in, y_array_in, x_dist)
# write output arrays to excel file -- just for debugging
agdrift_empty.write_arrays_to_csv(x_array_out, y_array_out, "output_array_generate.csv")
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg1(self):
"""
:description creates a running average for a specified x axis width (e.g., 7-day average values of an array)
:param x_array_in: array of x-axis values
:param y_array_in: array of y-axis values
:param num_db_values: number of points in the input arrays
:param x_array_out: array of x-zxis values in output array
:param y_array_out: array of y-axis values in output array
:param npts_out: number of points in the output array
:param x_dist: width in x_axis units of running weighted average
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE This test uses a uniformly spaced x_array and monotonically increasing y_array
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.]
expected_result_y = [2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5,11.5,
12.5,13.5,14.5,15.5,16.5,17.5,18.5,19.5,20.5,21.5,
22.5,23.5,24.5,25.5,26.5,27.5,28.5,29.5,30.5,31.5,
32.5,33.5,34.5,35.5,36.5,37.5,38.5,39.5,40.5,41.5,
42.5,43.5,44.5,45.5, 46.5]
expected_result_npts = 45
x_dist = 5.
num_db_values = 51
x_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
y_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(num_db_values, x_array_in,
y_array_in, x_dist)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg2(self):
"""
:description creates a running average for a specified x axis width (e.g., 7-day average values of an array)
:param x_array_in: array of x-axis values
:param y_array_in: array of y-axis values
:param num_db_values: number of points in the input arrays
:param x_array_out: array of x-zxis values in output array
:param y_array_out: array of y-axis values in output array
:param npts_out: number of points in the output array
:param x_dist: width in x_axis units of running weighted average
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE This test uses a non-uniformly spaced x_array and monotonically increasing y_array
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.5,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.5,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.5,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.5,42.,43.,44.]
expected_result_y = [2.5,3.5,4.5,5.5,6.5,7.5,8.4666667,9.4,10.4,11.4,
12.4,13.975,14.5,15.5,16.5,17.5,18.466666667,19.4,20.4,21.4,
22.4,23.975,24.5,25.5,26.5,27.5,28.46666667,29.4,30.4,31.4,
32.4,33.975,34.5,35.5,36.5,37.5,38.466666667,39.4,40.4,41.4,
42.4,43.975,44.5,45.5, 46.5]
expected_result_npts = 45
x_dist = 5.
agdrift_empty.num_db_values = 51
x_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.5,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.5,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.5,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.5,42.,43.,44.,45.,46.,47.,48.,49.,50.]
y_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(agdrift_empty.num_db_values,
x_array_in, y_array_in, x_dist)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg3(self):
"""
:description creates a running average for a specified x axis width (e.g., 7-day average values of an array);
averages reflect weighted average assuming linearity between x points;
average is calculated as the area under the y-curve beginning at each x point and extending out x_dist
divided by x_dist (which yields the weighted average y between the relevant x points)
:param x_array_in: array of x-axis values
:param y_array_in: array of y-axis values
:param num_db_values: number of points in the input arrays
:param x_array_out: array of x-zxis values in output array
:param y_array_out: array of y-axis values in output array
:param npts_out: number of points in the output array
:param x_dist: width in x_axis units of running weighted average
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE This test uses a monotonically increasing y_array and inserts a gap in the x values
that is greater than x_dist
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,1.,2.,3.,4.,5.,6.,7.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.,51.,52.]
expected_result_y = [2.5,3.5,4.5,5.4111111,6.14444444,6.7,7.07777777,7.277777777,10.5,11.5,
12.5,13.5,14.5,15.5,16.5,17.5,18.5,19.5,20.5,21.5,
22.5,23.5,24.5,25.5,26.5,27.5,28.5,29.5,30.5,31.5,
32.5,33.5,34.5,35.5,36.5,37.5,38.5,39.5,40.5,41.5,
42.5,43.5,44.5,45.5, 46.5]
expected_result_npts = 45
x_dist = 5.
num_db_values = 51
x_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.,
51.,52.,53.,54.,55.,56.,57.,58.]
y_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(num_db_values, x_array_in,
y_array_in, x_dist)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_locate_integrated_avg(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
and generates running weighted averages from the first x,y value until it locates the user
specified integrated average of interest
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016]
expected_result_y = [0.364712246,0.351507467,0.339214283,0.316974687,0.279954504,0.225948786,0.159949625,
0.123048839,0.099781801,0.071666234,0.056352938,0.03860139,0.029600805,0.024150524,
0.020550354,0.01795028,0.015967703,0.014467663,0.013200146,0.01215011,0.011300098,
0.010550085,0.009905072,0.009345065,0.008845057,0.008400051,0.008000046,0.007635043,
0.007300039,0.007000034,0.006725033,0.00646503,0.006230027,0.006010027,0.005805023,
0.005615023,0.005435021,0.00527002,0.00511002,0.004960017,0.004820017,0.004685016,
0.004560015,0.004440015,0.004325013,0.004220012,0.004120012,0.004020012,0.003925011,
0.003835011,0.00375001,0.00367001,0.00359001,0.00351001,0.003435009,0.003365009,
0.003300007,0.003235009,0.003170007,0.003110007,0.003055006,0.003000007,0.002945006,
0.002895006,0.002845006,0.002795006,0.002745006,0.002695006,0.002650005,0.002610005,
0.002570005,0.002525006,0.002485004,0.002450005,0.002410005,0.002370005,0.002335004,
0.002300005,0.002265004,0.002235004,0.002205004,0.002175004,0.002145004,0.002115004,
0.002085004,0.002055004,0.002025004,0.002000002,0.001975004,0.001945004,0.001920002,
0.001900002,0.001875004,0.001850002,0.001830002,0.001805004,0.001780002,0.001760002,
0.001740002,0.001720002,0.001700002,0.001680002,0.001660002,0.001640002,0.001620002,
0.001605001,0.001590002,0.001570002,0.001550002,0.001535001,0.001520002,0.001500002,
0.001485001,0.001470002,0.001455001,0.001440002,0.001425001,0.001410002,0.001395001,
0.001385001,0.001370002,0.001355001,0.001340002,0.001325001,0.001315001,0.001305001,
0.001290002,0.001275001,0.001265001,0.001255001,0.001245001,0.001230002,0.001215001,
0.001205001,0.001195001,0.001185001,0.001175001,0.001165001,0.001155001,0.001145001,
0.001135001,0.001125001,0.001115001,0.001105001,0.001095001,0.001085001,0.001075001,
0.001065001,0.00106,0.001055001,0.001045001,0.001035001,0.001025001,0.001015001,
0.001005001,0.0009985,0.000993001,0.000985001,0.000977001,0.000969501]
expected_result_npts = 160
expected_x_dist_of_interest = 990.8016
x_dist = 6.56
weighted_avg = 0.0009697 #this is the running average value we're looking for
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.scenario_name = 'ground_low_vf'
agdrift_empty.num_db_values = 161
agdrift_empty.find_nearest_x = True
x_array_in = agdrift_empty.get_distances(agdrift_empty.num_db_values)
y_array_in = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name, agdrift_empty.num_db_values)
x_array_out, y_array_out, npts_out, x_dist_of_interest, range_chk = \
agdrift_empty.locate_integrated_avg(agdrift_empty.num_db_values, x_array_in, y_array_in, x_dist, weighted_avg)
npt.assert_array_equal(expected_x_dist_of_interest, x_dist_of_interest, verbose=True)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} x-units to area and got {1} '.format(expected_x_dist_of_interest, x_dist_of_interest))
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_locate_integrated_avg1(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE this test is for a monotonically increasing function with some irregularity in x-axis points
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
x_array_in = pd.Series([], dtype='float')
y_array_in = | pd.Series([], dtype='float') | pandas.Series |
import pandas as pd
import math
import linecache
import numpy as np
from scipy import stats
from parameter_cal import cf
from dtw import dtw
import os
from scipy.misc import *
from sdtw.config import sub_len, nBlocks
from sdtw.utils import cal_descriptor, samplingSequences, norm
from parameter_cal.utils import get_fact_align, get_reverse_dict, get_SS2, get_SS1, get_link_graph, load_data
from parameter_cal.utils import load_data, cal_warped_signals, write_result_file
from downsample.utils import get_true_aligned, get_group_number, get_k_accuracy, get_warped_signals
def pkg_shapedtw(file_name, line_num, df):
file_name = 'data/' + file_name
y_list = load_data(file_name, line_num)
query, reference = cal_warped_signals(y_list)
# plot warped signal
xvals, yinterp = get_warped_signals(query, cf.ds_time)
# normalize the signal
reference_norm = stats.zscore(reference['q'])
yinterp_norm = stats.zscore(yinterp)
# store the corresponding point pair
query.drop(['shift', 't'], axis=1)
query2 = | pd.DataFrame({'t': xvals, 'q': yinterp}) | pandas.DataFrame |
import nose
import os
import numpy as np
import pandas as pd
from pandas import (merge_asof, read_csv,
to_datetime, Timedelta)
from pandas.tools.merge import MergeError
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal
class TestAsOfMerge(tm.TestCase):
_multiprocess_can_split_ = True
def read_data(self, name, dedupe=False):
path = os.path.join(tm.get_data_path(), name)
x = read_csv(path)
if dedupe:
x = (x.drop_duplicates(['time', 'ticker'], keep='last')
.reset_index(drop=True)
)
x.time = to_datetime(x.time)
return x
def setUp(self):
self.trades = self.read_data('trades.csv')
self.quotes = self.read_data('quotes.csv', dedupe=True)
self.asof = self.read_data('asof.csv')
self.tolerance = self.read_data('tolerance.csv')
self.allow_exact_matches = self.read_data('allow_exact_matches.csv')
self.allow_exact_matches_and_tolerance = self.read_data(
'allow_exact_matches_and_tolerance.csv')
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
'right_val': [1, 2, 3, 6, 7]})
pd.merge_asof(left, right, on='a')
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.038',
'20160525 13:30:00.048',
'20160525 13:30:00.048',
'20160525 13:30:00.048']),
'ticker': ['MSFT', 'MSFT',
'GOOG', 'GOOG', 'AAPL'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100]},
columns=['time', 'ticker', 'price', 'quantity'])
quotes = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.030',
'20160525 13:30:00.041',
'20160525 13:30:00.048',
'20160525 13:30:00.049',
'20160525 13:30:00.072',
'20160525 13:30:00.075']),
'ticker': ['GOOG', 'MSFT', 'MSFT',
'MSFT', 'GOOG', 'AAPL', 'GOOG',
'MSFT'],
'bid': [720.50, 51.95, 51.97, 51.99,
720.50, 97.99, 720.50, 52.01],
'ask': [720.93, 51.96, 51.98, 52.00,
720.93, 98.01, 720.88, 52.03]},
columns=['time', 'ticker', 'bid', 'ask'])
pd.merge_asof(trades, quotes,
on='time',
by='ticker')
pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('2ms'))
pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('10ms'),
allow_exact_matches=False)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype('category')
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype('category')
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != 'MSFT']
result = merge_asof(trades, q,
on='time',
by='ticker')
expected.loc[expected.ticker == 'MSFT', ['bid', 'ask']] = np.nan
assert_frame_equal(result, expected)
def test_basic2(self):
expected = self.read_data('asof2.csv')
trades = self.read_data('trades2.csv')
quotes = self.read_data('quotes2.csv', dedupe=True)
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = lambda x: x[x.ticker == 'MSFT'].drop('ticker', axis=1) \
.reset_index(drop=True)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes,
on='time')
assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
left_on='time',
right_on='bid',
by='ticker')
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
on=['time', 'ticker'],
by='ticker')
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
by='ticker')
def test_with_duplicates(self):
q = pd.concat([self.quotes, self.quotes]).sort_values(
['time', 'ticker']).reset_index(drop=True)
result = merge_asof(self.trades, q,
on='time',
by='ticker')
expected = self.read_data('asof.csv')
assert_frame_equal(result, expected)
result = merge_asof(self.trades, q,
on='time',
by='ticker',
check_duplicates=False)
expected = self.read_data('asof.csv')
expected = | pd.concat([expected, expected]) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime as dt
import time
import os
import imageio
from tqdm import tqdm
import sys
import statsmodels.api as sm
from scipy.stats import shapiro
from environment import StaticEnvironment
def date_to_index(date_string, start_date):
return (dt.datetime.strptime(date_string, '%Y-%m-%d') - start_date).days
def index_to_date(index, start_date):
return(start_date + dt.timedelta(index)).strftime('%Y-%m-%d')
def portfolio(w :np.array, r: np.array, mean_model, cov_model, satisfaction_model, annualize = False):
'''
:param w: n x 1 portfolio weights
:param r: t x n portfolio returns
:param mean_model: function for modelling the expected value of the portfolio
:param cov_model: function for modelling the covariance matrix
:param satisfaction: satisfaction function
:return:
'''
mu_hat = mean_model(r)
sig_hat = cov_model(r)
if annualize:
mu_hat *= 252
sig_hat *= 252
r_p = np.sum(mu_hat * w)
sig_p = np.sqrt(np.dot(w.T, np.dot(sig_hat, w)))
#satisfaction measure
satis = satisfaction_model(r_p, sig_p)
return np.array([r_p, sig_p, satis])
def test_static_agent(env, agent, optimization_problem,
fitting_period, rebalancing_period,
factors=None, **kwargs):
returns = []
actions = []
counter = 0
tic = time.perf_counter()
# factor indexing
if optimization_problem == 'sr_factor':
factor_env = StaticEnvironment(factors.loc[env.prices.index[0]:], **kwargs)
for trade in range(fitting_period, len(env.prices), rebalancing_period):
# print(trade, counter*rebalancing_period)
s_t = env.get_state(trade, counter * rebalancing_period)
# print(s_t.shape)
if optimization_problem == 'sr_factor':
s_t_factor = env.get_state(trade, counter * rebalancing_period)
a_t = agent.act(s_t, optimization_problem, factors=s_t_factor)
else:
a_t = agent.act(s_t, optimization_problem, **kwargs)
actions.append(a_t)
s_t_trade = s_t.iloc[-rebalancing_period:, :]
#transaction costs
if len(actions) > 1:
a_delta = actions[len(actions) - 1] - actions[len(actions) - 2]
r_t = np.dot(s_t_trade, a_t) - np.dot(s_t_trade * env.transaction_cost, a_delta)
else:
r_t = np.dot(s_t_trade, a_t)
returns.append(r_t)
counter += 1
returns = np.vstack(returns).flatten()
toc = time.perf_counter()
print(f"Tested {optimization_problem} in {toc - tic:0.4f} seconds")
return returns
def load_data_long(db_path):
db_long_prices = pd.read_csv(db_path + '00_db_long__PX_LAST.csv', index_col=0, parse_dates=True)
db_long_prices = db_long_prices.loc['2015':]
db_long_RL = db_long_prices.loc[:, ~db_long_prices.iloc[0].isna()].fillna(method='ffill')
return db_long_RL
def plot_training_result(rl_history, benchmark_history, n_, actions_to_plot, column_names):
rl_result = np.array(rl_history).cumsum()
benchmark_result = np.array(benchmark_history).cumsum()
fig = plt.figure(figsize=(12,6))
top = plt.subplot2grid((4, 4), (0, 0), rowspan=2, colspan=4)
bottom = plt.subplot2grid((4, 4), (2, 0), rowspan=2, colspan=4)
#returns
top.plot(rl_result, color='black', ls = '-')
top.plot(benchmark_result, color = 'grey', ls = '--')
#weights
for a in actions_to_plot:
plt.bar(np.arange(n_), a, color = 'goldenrod', alpha = 0.25)
plt.xticks(np.arange(n_), column_names, rotation = 'vertical')
plt_show()
def plot_histograms(ew, subset):
sns.set_palette('bright')
fig, ax = plt.subplots(figsize=(20, 15))
for i, column in enumerate(subset.columns, 1):
plt.subplot(3, 3, i)
to_plot = pd.concat([ew, subset[column]], axis=1)
sns.histplot(to_plot, kde=True, multiple='stack', alpha=0.5)
plt.xlim(-.13,.13)
def create_weights_gif(weights, model_name, saving_path, **plot_kwargs):
'''
@param weights: array of weights
@param model_name: name of the model, string
@param saving_path: path to save, string
@param plot_kwargs: list of kwargs to unpack for plot
@return: None
'''
tic = time.perf_counter()
n_frames = 5
x = weights.columns.to_list()
# obtain lists of weights for each day
y_lists = []
for _, row in weights.iterrows():
rw = row.to_list()
y_lists.append(rw)
# iterate over each row
filenames = []
y_cache = []
with tqdm(total=round(len(y_lists) / 20, 0), file=sys.stdout) as pbar:
for index in np.arange(0, len(y_lists) - 1, step=20):
y = y_lists[index]
y1 = y_lists[index + 1]
# distance to next pos
y_path = np.array(y1) - np.array(y)
# obtain image for each frame
for i in np.arange(0, n_frames + 1):
y_temp = (y + (y_path / n_frames) * i)
y_cache.append(y_temp)
# plot
fig, ax = plt.subplots(figsize=(10, 8))
plt.barh(x, y_temp, color='goldenrod', **plot_kwargs)
# cache and plot dissipating weights
if len(y_cache) > 0:
for idx, cache in enumerate(y_cache):
plt.barh(x, cache, color='goldenrod', alpha=0.4 - 0.05 * idx)
plt.xlim(0, 0.07)
# if cache is full first in last out
if len(y_cache) == 8:
y_cache.pop(0)
# build a filename
filename = os.path.join(saving_path, f'gif/frame_{index}_{i}.png')
filenames.append(filename)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title(f'{model_name} test trading day: #{index}')
ax.set_xlabel('weight')
# last frame needs to stay longer
if (i == n_frames):
for i in range(2):
filenames.append(filename)
# save images
plt.savefig(filename, dpi=96)
plt.close()
pbar.update(1)
print('Charts saved \n')
print('Creating gif\n')
# create the gif
with imageio.get_writer(os.path.join(saving_path, f'{model_name}_weights.gif'), mode='I') as writer:
for filename in filenames:
image = imageio.imread(filename)
writer.append_data(image)
toc = time.perf_counter()
print(f'Gif produced in {(toc - tic) / 60 :0.4f} minutes')
# print('Removing Images\n')
# # Remove files
# for filename in set(filenames):
# os.remove(filename)
print('DONE')
def normality_test(ew, subset):
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(20, 12))
ax = axes.flatten()
subset = | pd.concat([ew, subset], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
from qiime2 import Metadata
from microsetta_public_api import config
from microsetta_public_api.resources import resources
from microsetta_public_api.utils.testing import (TempfileTestCase,
ConfigTestCase)
from microsetta_public_api.repo._metadata_repo import MetadataRepo
class TestMetadataRepo(TempfileTestCase, ConfigTestCase):
def setUp(self):
TempfileTestCase.setUp(self)
ConfigTestCase.setUp(self)
self.metadata_filename = self.create_tempfile(suffix='.qza').name
self.test_metadata = pd.DataFrame({
'age_cat': ['30s', '40s', '50s', '30s', np.nan],
'num_cat': [7.24, 7.24, 8.25, 7.24, np.nan],
'other': [1, 2, 3, 4, np.nan],
}, index= | pd.Series(['a', 'b', 'c', 'd', 'e'], name='#SampleID') | pandas.Series |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = | tm.makeDataFrame() | pandas.util.testing.makeDataFrame |
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, Unique, UniqueCombinations)
def dummy_transform_table(table_data):
return table_data
def dummy_reverse_transform_table(table_data):
return table_data
def dummy_is_valid_table(table_data):
return [True] * len(table_data)
def dummy_transform_table_column(table_data, column):
return table_data
def dummy_reverse_transform_table_column(table_data, column):
return table_data
def dummy_is_valid_table_column(table_data, column):
return [True] * len(table_data[column])
def dummy_transform_column(column_data):
return column_data
def dummy_reverse_transform_column(column_data):
return column_data
def dummy_is_valid_column(column_data):
return [True] * len(column_data)
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid_table'
# Run
instance = CustomConstraint(
transform=dummy_transform_table,
reverse_transform=dummy_reverse_transform_table,
is_valid=is_valid_fqn
)
# Assert
assert instance._transform == dummy_transform_table
assert instance._reverse_transform == dummy_reverse_transform_table
assert instance._is_valid == dummy_is_valid_table
def test__run_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy transform function with ``table_data`` argument.
Side Effects:
- Run transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` argument.
Side Effects:
- Run reverse transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = reverse_transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` argument.
Side Effects:
- Run is valid function once with ``table_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table)
# Run
instance = CustomConstraint(is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
assert called[0][1] == 'a'
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run reverse transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
assert called[0][1] == 'a'
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` and ``column`` argument.
Side Effects:
- Run is valid function once with ``table_data`` and ``column`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
assert called[0][1] == 'a'
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy transform function with ``column_data`` argument.
Side Effects:
- Run transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy reverse transform function with ``column_data`` argument.
Side Effects:
- Run reverse transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy is valid function with ``column_data`` argument.
Side Effects:
- Run is valid function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
np.testing.assert_array_equal(is_valid, expected_out)
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == tuple(columns)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init__with_one_column(self):
"""Test the ``UniqueCombinations.__init__`` method with only one constraint column.
Expect a ``ValueError`` because UniqueCombinations requires at least two
constraint columns.
Side effects:
- A ValueError is raised
"""
# Setup
columns = ['c']
# Run and assert
with pytest.raises(ValueError):
UniqueCombinations(columns=columns)
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test__validate_scalar(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = 'b'
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = 'b'
scalar = 'high'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_list(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = ['b']
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = ['b']
scalar = 'low'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_error(self):
"""Test the ``_validate_scalar`` method.
This method raises an error when the the scalar column is a list.
Input:
- scalar_column = 0
- column_names = 'b'
Side effect:
- Raise error since the scalar is a list
"""
# Setup
scalar_column = [0]
column_names = 'b'
scalar = 'high'
# Run / Assert
with pytest.raises(TypeError):
GreaterThan._validate_scalar(scalar_column, column_names, scalar)
def test__validate_inputs_high_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
Output:
- low == ['a']
- high == 3
- constraint_columns = ('a')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar='high', drop=None)
# Assert
low == ['a']
high == 3
constraint_columns == ('a',)
def test__validate_inputs_low_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 3
- high = 'b'
- scalar = 'low'
- drop = None
Output:
- low == 3
- high == ['b']
- constraint_columns = ('b')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=3, high='b', scalar='low', drop=None)
# Assert
low == 3
high == ['b']
constraint_columns == ('b',)
def test__validate_inputs_scalar_none(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3 # where 3 is a column name
- scalar = None
- drop = None
Output:
- low == ['a']
- high == [3]
- constraint_columns = ('a', 3)
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar=None, drop=None)
# Assert
low == ['a']
high == [3]
constraint_columns == ('a', 3)
def test__validate_inputs_scalar_none_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a']
- high = ['b', 'c']
- scalar = None
- drop = None
Output:
- low == ['a']
- high == ['b', 'c']
- constraint_columns = ('a', 'b', 'c')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=['a'], high=['b', 'c'], scalar=None, drop=None)
# Assert
low == ['a']
high == ['b', 'c']
constraint_columns == ('a', 'b', 'c')
def test__validate_inputs_scalar_none_two_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a', 0]
- high = ['b', 'c']
- scalar = None
- drop = None
Side effect:
- Raise error because both high and low are more than one column
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=['a', 0], high=['b', 'c'], scalar=None, drop=None)
def test__validate_inputs_scalar_unknown(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'unknown'
- drop = None
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high='b', scalar='unknown', drop=None)
def test__validate_inputs_drop_error_low(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 2
- high = 'b'
- scalar = 'low'
- drop = 'low'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=2, high='b', scalar='low', drop='low')
def test__validate_inputs_drop_error_high(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
- drop = 'high'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high=3, scalar='high', drop='high')
def test__validate_inputs_drop_success(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'high'
- drop = 'low'
Output:
- low = ['a']
- high = 0
- constraint_columns == ('a')
"""
# Run / Assert
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=0, scalar='high', drop='low')
assert low == ['a']
assert high == 0
assert constraint_columns == ('a',)
def test___init___(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == ['a']
assert instance._high == ['b']
assert instance._strict is False
assert instance._scalar is None
assert instance._drop is None
assert instance.constraint_columns == ('a', 'b')
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='transform')
# Assert
assert instance.rebuild_columns == ['b']
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init___high_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 'a'
- high = 0
- strict = True
- drop = 'low'
- scalar = 'high'
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._drop = 'low'
- instance._scalar == 'high'
"""
# Run
instance = GreaterThan(low='a', high=0, strict=True, drop='low', scalar='high')
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
assert instance.constraint_columns == ('a',)
def test___init___low_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 0
- high = 'a'
- strict = True
- drop = 'high'
- scalar = 'low'
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._stric == True
- instance._drop = 'high'
- instance._scalar == 'low'
"""
# Run
instance = GreaterThan(low=0, high='a', strict=True, drop='high', scalar='low')
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
assert instance.constraint_columns == ('a',)
def test___init___strict_is_false(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater_equal``
when ``strict`` is set to ``False``.
Input:
- low = 'a'
- high = 'b'
- strict = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=False)
# Assert
assert instance.operator == np.greater_equal
def test___init___strict_is_true(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater``
when ``strict`` is set to ``True``.
Input:
- low = 'a'
- high = 'b'
- strict = True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Assert
assert instance.operator == np.greater
def test__init__get_columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'high'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'low'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
instance._columns_to_reconstruct == ['a']
def test__init__get_columns_to_reconstruct_scalar_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 0
- scalar = 'high'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high=0, scalar='high')
instance._columns_to_reconstruct == ['a']
def test__get_value_column_list(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
"""
# Setup
instance = GreaterThan(low='a', high='b')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = table_data[['a']].values
np.testing.assert_array_equal(out, expected)
def test__get_value_scalar(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
- scalar = 'low'
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = 3
assert out == expected
def test__get_diff_columns_name_low_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b#'], scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b##']
assert out == expected
def test__get_diff_columns_name_high_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b#']
assert out == expected
def test__get_diff_columns_name_scalar_is_none(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b#', scalar=None)
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b##a']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_low(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a#', 'c'], high='b', scalar=None)
table_data = pd.DataFrame({
'a#': [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a##b', 'c#b']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_high(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['b', 'c'], scalar=None)
table_data = pd.DataFrame({
0: [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b#0', 'c#0']
assert out == expected
def test__check_columns_exist_success(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
instance._check_columns_exist(table_data, 'high')
def test__check_columns_exist_error(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='c')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
with pytest.raises(KeyError):
instance._check_columns_exist(table_data, 'high')
def test__fit_only_one_datetime_arg(self):
"""Test the ``Between._fit`` method by passing in only one arg as datetime.
If only one of the high / low args is a datetime type, expect a ValueError.
Input:
- low is an int column
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
instance = GreaterThan(low='a', high=pd.to_datetime('2021-01-01'), scalar='high')
# Run and assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(ValueError):
instance._fit(table_data)
def test__fit__low_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__low_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='c', high=3, scalar='high')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='c', scalar='low')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `low` if ``instance._scalar`` is ``'high'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` if ``instance._scalar`` is ``'low'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__diff_columns_one_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = | pd.DataFrame({'a': [1, 2, 3]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri May 10 13:29:26 2013
@author: <NAME> (<EMAIL>)
@version: 0.5.1
"""
from __future__ import division
import os, sys
import numpy as np
import pandas as pd
import codecs
import collections
import copy
import re
def read_label(fname, ftype, codec=None, tiers=None, addcols=[], stop_on_error=True, ignore_index=True):
'''Read one or more label files and extract specified tiers as a list of
dataframes, one dataframe per tier.
Each tier dataframe will contain a row for every label found in the specified
tier from each of the label files. Each row has a column indicating the
start time of the label interval ('t1') and the end time ('t2'). The label
text is in another column, named 'label' by default. If the tiers parameter
is used and running under Python 3, the label column will be named with the
tier name, if provided as a string and the string is a valid column name
(e.g. doesn't contain spaces). A fourth column ('fname') is added to the
tier dataframe that indicates which label file was the source of the label row.
The label files are required to be similar--same type, same codec, and with
identical tier names.
Required parameters:
fname = list of label file filenames; for a single file this parameter can be
provided as a string
ftype = the label file type; must be one of 'praat', 'praat_short',
'praat_long', 'esps', 'wavesurfer'
Optional parameters:
codec = codec used to read the label files (e.g. 'utf-8', 'ascii');
If codec is None, then Praat textgrids will default to 'utf-8' unless a
file-specific encoding is detected. [default None]
tiers = int, str, list-like; tier name(s) or tier index(es) to read into
DataFrames. DataFrames are returned in the same order as the list. If
this parameter is not used or None, then all tiers will be returned as
DataFrames. Every input label file must be compatible with the tier list.
addcols = list of additional DataFrame columns names to process and include
in output DataFrames. Possible column names and values provided are:
'barename': the label file's barename, with no path info or extension
'dirname': the user-provided path to the label file without the filename
'ext': the label file's extension
'fidx': the idx of the label file in fname
[default []]
stop_on_error = boolean; when True an error in processing an input file will
immediately reraise the error and no dataframes are returned; when False
the error message is sent to STDERR and file processing continues, if
possible. [default True]
ignore_index = boolean; value is passed to pd.concat()'s ignore_index
parameter when DataFrames from a label file are concatenated to the
existing tier DataFrames. When True, each tier DataFrame will have
an index with range (0:N). When False, the index resets to 0 at the
first row for each label file. [default True]
'''
# Coerce to list if fname is a string.
try:
assert isinstance(fname, basestring) # Python 2
fname = [fname]
except AssertionError:
pass
except NameError:
try:
assert isinstance(fname, str) # Python 3
fname = [fname]
except AssertionError:
pass
if tiers is not None:
# Coerce to list if tiers is a string or int.
if isinstance(tiers, int):
tiers = [tiers]
else:
try:
assert isinstance(tiers, basestring) # Python 2
tiers = [tiers]
except AssertionError:
pass
except NameError:
try:
assert isinstance(tiers, str) # Python 3
tiers = [tiers]
except AssertionError:
pass
dflist = None
for fidx, f in enumerate(fname):
dirname, basename = os.path.split(f)
barename, ext = os.path.splitext(basename)
assigndict = {'fname': f} # Container for extra columns to assign()
poss_add = { # Possible columns to add.
'barename': barename, 'fname': f, 'dirname': dirname,
'fidx': fidx, 'ext': ext
}
for k, v in poss_add.items():
if k in addcols:
assigndict.update({k: v})
try:
lm = LabelManager(from_file=f, from_type=ftype, codec=codec)
tlist = lm.as_df(tiers=tiers, includes=[])
if dflist is None:
dflist = [[] for t in tlist] # list of lists
except Exception as e:
if stop_on_error is True:
raise e
else:
sys.stderr.write(e.msg())
continue
for idx, tr in enumerate(tlist):
tr = tr.assign(**assigndict)
dflist[idx].append(tr)
# Make a list of tier DataFrames.
dfs = [pd.concat(lst, ignore_index=ignore_index) for lst in dflist]
# Rename column containing label text content.
# If the tier parameter was not used, do not attempt to determine
# the tier names in case the input list of label files has an
# inconsistent number of tiers or tier names.
if tiers is None:
for df in dfs:
df.rename(columns={'text': 'label'}, inplace=True)
else:
for df, tier in zip(dfs, tiers):
tname = 'label'
try:
assert tier.isidentifier()
tname = tier
except (AssertionError, AttributeError):
pass
df.rename(columns={'text': tname}, inplace=True)
# Cast some columns to type Categorical.
catset = set(('barename', 'fname', 'dirname', 'ext'))
for df in dfs:
for c in list(catset & set(df.columns)): # intersection with catset
df[c] = df[c].astype('category')
return dfs
# Some convenience functions to be used in the classes.
# Strip white space at edges, remove surrounding quotes, and unescape quotes.
def _clean_praat_string(s):
return re.sub('""', '"', re.sub('^"|"$', '', s.strip()))
class LabelError(Exception):
"""Base class for errors in this module."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class LabelTimeValueError(LabelError):
"""Exceptions raised for incorrect time values."""
pass
class LabelManagerError(LabelError):
"""Exceptions raised for missing annotation manager objects."""
pass
class LabelManagerReadError(LabelError):
"""Exceptions raised for missing annotation manager objects."""
pass
class LabelManagerMissingValueError(LabelError):
"""Exceptions raised for missing annotation objects."""
pass
class LabelManagerParseError(LabelError):
"""Exceptions raised for missing annotation objects."""
pass
# TODO: make content unicode-capable
class Label(object):
"""An individual annotation."""
def __init__(self, text='', t1=None, t2=None, appdata=None, metadata=None,
codec='utf-8', *args, **kwargs):
super(Label, self).__init__()
if t1 == None:
raise LabelTimeValueError('Missing t1 argument in __init__().')
try:
self._t1 = float(t1) # Cast from string to be friendly.
except TypeError: # t1 == None
self._t1 = None
try:
self._t2 = float(t2)
except TypeError: # t2 == None
self._t2 = None
self.text = text
self.codec = codec
self.appdata = appdata # Container for app-specific data not used
# by this class.
def __repr__(self):
if self._t2 == None:
t2str = ''
else:
t2str = "t2={t2:0.4f}, ".format(t2=self._t2)
text = self.text
try:
text = self.text.encode(self.codec)
except NameError:
pass
return "Label( t1={t1:0.4f}, {t2}text='{text}' )".format(
t1=self._t1,
t2=t2str,
text=text
)
def _repr_html_(self):
"""Output for ipython notebook."""
if self._t2 == None:
t2str = ''
else:
t2str = "<b>t2</b>={t2:0.4f}, ".format(t2=self._t2)
return "<b>Label</b>( <b>t1</b>={t1:0.4f}, {t2}<b>text</b>='{text}' )".format(t1=self._t1,t2=t2str,text=self.text)
def _scale_by(self, factor):
self._t1 *= factor
if self._t2 != None: self._t2 *= factor
def _shift_by(self, t):
self._t1 += t
if self._t2 != None: self._t2 += t
@property
def t1(self):
"""Return the first (possibly only) timepoint of the Label."""
return self._t1
@property
def t2(self):
"""Return the second timepoint of the Label."""
return self._t2
@property
def duration(self):
"""Return the duration of the label, or np.nan if the label represents a point
in time."""
dur = np.nan
if self._t2 != None:
dur = self._t2 - self._t1
return dur
@property
def center(self):
"""Return the time centerpoint of the label. If the label represents
a point in time, return the point."""
ctr = self._t1
if self._t2 != None:
ctr = (self._t1 + self._t2) / 2.0
return ctr
class _LabelTier(collections.MutableSet):
"""A manager of (point) Label objects"""
def __init__(self, start=0.0, end=float('inf'), name='', numlabels=None):
super(_LabelTier, self).__init__()
self.name = name
self.start = float(start)
self.end = float(end)
self.extra_data = {} # Container for additional file-specific data.
self._list = [] # Container for Label objects.
# Array of starting (t1) timepoints used for calculations.
if numlabels == None:
self._time = np.array([])
else: # Preallocate array.
self._time = np.empty(int(numlabels))
self._time[:] = np.nan
def __repr__(self):
s = "[" + ",".join(repr(l) for l in self._list) + "]"
return s
def _repr_html_(self):
"""Output for ipython notebook."""
s = "<ul>[<li>"
if len(self._list) > 10:
s += "</li><li>".join(self._list[n]._repr_html_() for n in range(5))
s += "</li><li>...</li><li>"
s += "</li><li>".join(self._list[n]._repr_html_() for n in range(-5,0))
else:
s += "</li><li>".join(l._repr_html_() for l in self._list)
s += "</li>]</ul>"
return s
#### Methods required by abstract base class ####
def __contains__(self, x):
return self._list.__contains__(x)
def __iter__(self):
return iter(self._list)
def add(self, label):
"""Add an annotation object."""
idx = np.searchsorted(self._time, label.t1)
self._list.insert(idx, label)
if len(self._time) > idx and np.isnan(self._time[idx]):
self._time[idx] = label.t1
else:
self._time = np.hstack([self._time[:idx], label.t1, self._time[idx:]])
def discard(self, label):
"""Remove a Label object."""
idx = self._list.index(label)
self._list.remove(label)
self._time = np.hstack([self._time[:idx], self._time[idx+1:]])
def __len__(self):
return len(self._list)
def _from_iterable(self, iterable):
"""The default constructor does not allow construction from an iterable,
which causes mixins inherited from Set to fail. This method handles construction
from an iterable."""
# FIXME: not implemented. also need to implement in derived classes for casts from other derived classes. Or implement equivalent functionality in LabelManager?
pass
#### End of methods required by abstract base class ####
def __getitem__(self, key):
'''Allow indexing of tier like a list.'''
return self._list[key]
def prev(self, label, skip=0):
"""Return the label preceding label. Use the skip parameter to return an earlier label, e.g. skip=1 returns the second preceding label."""
idx = self._list.index(label) - skip - 1
try:
label = self._list[idx]
except IndexError:
label = None
return label
def next(self, label, skip=0):
"""Return the label following label. Use the skip parameter to return a later label, e.g. skip=1 returns the second label after label."""
idx = self._list.index(label) + skip + 1
try:
label = self._list[idx]
except IndexError:
label = None
return label
def label_at(self, time, method='closest'):
"""Return the label occurring at a particular time."""
label = None
if method == 'closest':
idx = abs(self._time - time).argmin()
label = self._list[idx]
return label
def search(self, pattern, return_match=False, **kwargs):
"""Return the ordered list of Label objects that contain pattern. If
the return_match is True, return an ordered list of tuples that
contain the matching labels and corresponding match objects."""
try: # Python 2
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
except NameError: # Python 3
if isinstance(pattern, str):
pattern = re.compile(pattern)
if len(kwargs) == 0:
labels = self._list
else:
labels = self.tslice(**kwargs)
if return_match:
return [(l,m) \
for l in labels \
# TODO: verify that *not* encoding is the correct thing to do
# for m in [pattern.search(l.text.encode(l.codec))] \
for m in [pattern.search(l.text)] \
if m]
else:
return [l \
for l in labels \
# TODO: verify that *not* encoding is the correct thing to do
# if pattern.search(l.text.encode(l.codec))]
if pattern.search(l.text)]
def tslice(self, t1, t2=None, tol=0.0, ltol=0.0, rtol=0.0, lincl=True, \
rincl=True):
"""Return a time slice, an ordered list of Labels in the given time
range."""
# tol: symmetrical tolerance for exact match (extended by ltol/rtol)
# ltol/rtol: tolerances on left/right side of time range (in addition to tol).
# lincl/rincl: whether to include exact match on left/right of time range.
left = t1 - tol - ltol
right = None
sl = []
if t2 == None: # Looking for a single match.
right = t1 + tol + rtol
else:
right = t2 + tol + rtol
if lincl and rincl:
sl = [l for l in self._list if (l.t1 >= left and l.t1 <= right) ]
elif lincl:
sl = [l for l in self._list if (l.t1 >= left and l.t1 < right) ]
elif rincl:
sl = [l for l in self._list if (l.t1 > left and l.t1 <= right) ]
else:
sl = [l for l in self._list if (l.t1 > left and l.t1 < right) ]
if t2 == None:
if len(sl) > 1:
raise IndexError(
"Found {:d} Labels while looking for one".format(len(sl))
)
elif len(sl) == 1:
sl = sl[0]
return sl
def scale_by(self, factor):
"""Multiply all annotation times by a factor."""
for item in self:
item._scale_by(factor)
self._time *= factor
def shift_by(self, t):
"""Add a constant to all annotation times."""
for item in self:
item._shift_by(t)
self._time += t
# TODO: come up with a good name and calling convention, then make
# this a normal (non-hidden) method; change in subclasses too.
def _as_string(self, fmt=None):
sys.stderr.write('''WARNING: _as_string() is deprecated and will be removed in a later version.
Please use as_string() instead.
'''
)
return self.as_string(fmt=fmt)
def as_string(self, fmt=None):
"""Return the tier as a string of label file type fmt. To be implemented in a subclass."""
pass
def as_df(self, includes=['duration', 'center']):
"""Return the tier as a Pandas DataFrame. To be implemented in a subclass."""
pass
class PointTier(_LabelTier):
"""A manager of (point) Label objects"""
def __init__(self, start=0.0, end=float('inf'), name='', numlabels=None, *args, **kwargs):
super(PointTier, self).__init__(start, end, name, numlabels, *args, **kwargs)
def __repr__(self):
s = "PointTier("
s += super(PointTier, self).__repr__()
return s + ")\n"
def _repr_html_(self):
"""Output for ipython notebook."""
s = "<p><b>PointTier</b>( "
s += super(PointTier, self)._repr_html_()
return s + " )</p>"
def _as_string(self, fmt=None):
sys.stderr.write('''WARNING: _as_string() is deprecated and will be removed in a later version.
Please use as_string() instead.
'''
)
return self.as_string(fmt=fmt)
def as_string(self, fmt=None):
"""Return the tier as a string of type fmt."""
if fmt == 'praat_long':
labels = [
' class = "TextTier"',
' name = "{:s}"'.format(self.name),
" xmin = {:0.12f}".format(self.start),
" xmax = {:0.12f}".format(self.end),
" points: size = {:d}".format(len(self))
]
for idx,lab in enumerate(self._list):
lab = '\n'.join((
" points [{:d}]:".format(idx+1),
" number = {:1.20f}".format(lab.t1),
' mark = "{:s}"'.format(
lab.text.replace('"', '""')
)
))
labels.append(lab)
return '\n'.join(labels)
elif fmt == 'praat_short':
labels = [
'"TextTier"',
'"{:s}"'.format(self.name),
"{:0.12f}".format(self.start),
"{:0.12f}".format(self.end),
"{:d}".format(len(self))
]
for lab in self._list:
lab = '\n'.join((
"{:1.20f}".format(lab.t1),
'"{:s}"'.format(lab.text.replace('"', '""'))
))
labels.append(lab)
return '\n'.join(labels)
elif fmt == 'esps':
# TODO: implement
pass
elif fmt == 'wavesurfer':
pass
# TODO: implement
def as_df(self):
"""Return the tier as a Pandas DataFrame."""
t1 = pd.Series(self._time)
text = pd.Series([None] * len(t1))
for idx, label in enumerate(self):
text[idx] = label.text
df = pd.concat([t1, text], axis=1)
df.columns = ['t1', 'text']
return df
def add(self, label):
"""Add an annotation object."""
super(PointTier, self).add(label)
if self.end == np.Inf or label.t1 > self.end:
self.end = label.t1
# TODO: add discard() and adjust self.end?
class IntervalTier(_LabelTier):
"""A manager of interval Label objects"""
def __init__(self, start=0.0, end=float('inf'), name='', numlabels=None, *args, **kwargs):
super(IntervalTier, self).__init__(start, end, name, numlabels, *args, **kwargs)
# Get/set start time of list of point annotations.
def __repr__(self):
s = "IntervalTier("
s += super(IntervalTier, self).__repr__()
return s + ")\n"
def _repr_html_(self):
"""Output for ipython notebook."""
s = "<p><b>IntervalTier</b>( "
s += super(IntervalTier, self)._repr_html_()
return s + " )</p>"
def _as_string(self, fmt=None):
sys.stderr.write('''WARNING: _as_string() is deprecated and will be removed in a later version.
Please use as_string() instead.
'''
)
return self.as_string(fmt=fmt)
def as_string(self, fmt=None):
"""Return the tier as a string of type fmt."""
if fmt == 'praat_long':
labels = [
' class = "IntervalTier"',
' name = "{:s}"'.format(self.name),
" xmin = {:0.12f}".format(self.start),
" xmax = {:0.12f}".format(self.end),
" intervals: size = {:d}".format(len(self))
]
for idx,lab in enumerate(self._list):
lab = '\n'.join((
" intervals [{:d}]:".format(idx+1),
" xmin = {:1.20f}".format(lab.t1),
" xmax = {:1.20f}".format(lab.t2),
' text = "{:s}"'.format(
lab.text.replace('"', '""')
)
))
labels.append(lab)
return '\n'.join(labels)
elif fmt == 'praat_short':
labels = [
'"IntervalTier"',
'"{:s}"'.format(self.name),
"{:0.12f}".format(self.start),
"{:0.12f}".format(self.end),
"{:d}".format(len(self))
]
for lab in self._list:
lab = '\n'.join((
"{:1.20f}".format(lab.t1),
"{:1.20f}".format(lab.t2),
'"{:s}"'.format(lab.text.replace('"', '""'))
))
labels.append(lab)
return '\n'.join(labels)
elif fmt == 'esps':
# TODO: implement
pass
elif fmt == 'wavesurfer':
pass
# TODO: implement
def as_df(self, includes=['duration', 'center']):
"""Return the tier as a Pandas DataFrame.
The includes parameter is a list of strings that identify precalculated
columns to be included as a convenience. By default these columns are
'duration' (t2 - t1) and 'center' ((t2 + t1) / 2). Since the information
in these columns can be calculated from t1 and t2 you can reduce the
memory usage of the DataFrame by excluding one or both of these strings
from the includes list."""
t1 = pd.Series(self._time)
t2 = pd.Series([np.nan] * len(t1))
text = pd.Series([None] * len(t1))
if 'duration' in includes:
dur = pd.Series([None] * len(t1), dtype=np.float64)
if 'center' in includes:
ctr = pd.Series([None] * len(t1), dtype=np.float64)
for idx, label in enumerate(self):
t2[idx] = label.t2
text[idx] = label.text
if 'duration' in includes:
dur[idx] = label.duration
if 'center' in includes:
ctr[idx] = label.center
cols = ['t1', 't2', 'text']
df = pd.concat([t1, t2, text], axis=1)
if 'duration' in includes:
cols.extend(['duration'])
df = pd.concat([df, dur], axis=1)
if 'center' in includes:
cols.extend(['center'])
df = | pd.concat([df, ctr], axis=1) | pandas.concat |
import requests
import pandas as pd
import numpy as np
import configparser
from datetime import timedelta, datetime
from dateutil import relativedelta, parser, rrule
from dateutil.rrule import WEEKLY
class whoop_login:
'''A class object to allow a user to login and store their authorization code,
then perform pulls using the code in order to access different types of data'''
def __init__(self, auth_code=None, whoop_id=None,current_datetime=datetime.utcnow()):
self.auth_code=auth_code
self.whoop_id=whoop_id
self.current_datetime=current_datetime
self.start_datetime=None
self.all_data=None
self.all_activities=None
self.sport_dict=None
self.all_sleep=None
self.all_sleep_events=None
def pull_api(self, url,df=False):
auth_code=self.auth_code
headers={'authorization':auth_code}
pull=requests.get(url,headers=headers)
if pull.status_code==200 and len(pull.content)>1:
if df:
d=pd.json_normalize(pull.json())
return d
else:
return pull.json()
else:
return "no response"
def pull_sleep_main(self,sleep_id):
athlete_id=self.whoop_id
sleep=self.pull_api('https://api-7.whoop.com/users/{}/sleeps/{}'.format(athlete_id,sleep_id))
main_df=pd.json_normalize(sleep)
return main_df
def pull_sleep_events(self,sleep_id):
athlete_id=self.whoop_id
sleep=self.pull_api('https://api-7.whoop.com/users/{}/sleeps/{}'.format(athlete_id,sleep_id))
events_df=pd.json_normalize(sleep['events'])
events_df['id']=sleep_id
return events_df
def get_authorization(self,user_ini):
'''
Function to get the authorization token and user id.
This must be completed before a user can query the api
'''
config=configparser.ConfigParser()
config.read(user_ini)
username=config['whoop']['username']
password=config['whoop']['password']
headers={
"username": username,
"password": password,
"grant_type": "password",
"issueRefresh": False}
auth = requests.post("https://api-7.whoop.com/oauth/token", json=headers)
if auth.status_code==200:
content=auth.json()
user_id=content['user']['id']
token=content['access_token']
start_time=content['user']['profile']['createdAt']
self.whoop_id=user_id
self.auth_code='bearer ' + token
self.start_datetime=start_time
print("Authentication successful")
else:
print("Authentication failed - please double check your credentials")
def get_keydata_all(self):
'''
This function returns a dataframe of WHOOP metrics for each day of WHOOP membership.
In the resulting dataframe, each day is a row and contains strain, recovery, and sleep information
'''
if self.start_datetime:
if self.all_data is not None:
## All data already pulled
return self.all_data
else:
start_date=parser.isoparse(self.start_datetime).replace(tzinfo=None)
end_time='T23:59:59.999Z'
start_time='T00:00:00.000Z'
intervals=rrule.rrule(freq=WEEKLY,interval=1,until=self.current_datetime, dtstart=start_date)
date_range=[[d.strftime('%Y-%m-%d') + start_time,
(d+relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d') + end_time] for d in intervals]
all_data=pd.DataFrame()
for dates in date_range:
cycle_url='https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(self.whoop_id,
dates[1],
dates[0])
data=self.pull_api(cycle_url,df=True)
all_data=pd.concat([all_data,data])
all_data.reset_index(drop=True,inplace=True)
## fixing the day column so it's not a list
all_data['days']=all_data['days'].map(lambda d: d[0])
all_data.rename(columns={"days":'day'},inplace=True)
## Putting all time into minutes instead of milliseconds
sleep_cols=['qualityDuration','needBreakdown.baseline','needBreakdown.debt','needBreakdown.naps',
'needBreakdown.strain','needBreakdown.total']
for sleep_col in sleep_cols:
all_data['sleep.' + sleep_col]=all_data['sleep.' + sleep_col].astype(float).apply(lambda x: np.nan if np.isnan(x) else x/60000)
## Making nap variable
all_data['nap_duration']=all_data['sleep.naps'].apply(lambda x: x[0]['qualityDuration']/60000 if len(x)==1 else(
sum([y['qualityDuration'] for y in x if y['qualityDuration'] is not None])/60000 if len(x)>1 else 0))
all_data.drop(['sleep.naps'],axis=1,inplace=True)
## dropping duplicates subsetting because of list columns
all_data.drop_duplicates(subset=['day','sleep.id'],inplace=True)
self.all_data=all_data
return all_data
else:
print("Please run the authorization function first")
def get_activities_all(self):
'''
Activity data is pulled through the get_keydata functions so if the data pull is present, this function
just transforms the activity column into a dataframe of activities, where each activity is a row.
If it has not been pulled, this function runs the key data function then returns the activity dataframe'''
if self.sport_dict:
sport_dict=self.sport_dict
else:
sports=self.pull_api('https://api-7.whoop.com/sports')
sport_dict={sport['id']:sport['name'] for sport in sports}
self.sport_dict=self.sport_dict
if self.start_datetime:
## process activity data
if self.all_data is not None:
## use existing
data=self.all_data
else:
## pull all data to process activities
data=self.get_keydata_all()
## now process activities data
act_data=pd.json_normalize(data[data['strain.workouts'].apply(len)>0]['strain.workouts'].apply(lambda x: x[0]))
act_data[['during.upper','during.lower']]=act_data[['during.upper','during.lower']].apply(pd.to_datetime)
act_data['total_minutes']=act_data.apply(lambda x: (x['during.upper']-x['during.lower']).total_seconds()/60.0,axis=1)
for z in range(0,6):
act_data['zone{}_minutes'.format(z+1)]=act_data['zones'].apply(lambda x: x[z]/60000.)
act_data['sport_name']=act_data.sportId.apply(lambda x: sport_dict[x])
act_data['day']=act_data['during.lower'].dt.strftime('%Y-%m-%d')
act_data.drop(['zones','during.bounds'],axis=1,inplace=True)
act_data.drop_duplicates(inplace=True)
self.all_activities=act_data
return act_data
else:
print("Please run the authorization function first")
def get_sleep_all(self):
'''
This function returns all sleep metrics in a data frame, for the duration of user's WHOOP membership.
Each row in the data frame represents one night of sleep
'''
if self.auth_code:
if self.all_data is not None:
## use existing
data=self.all_data
else:
## pull timeframe data
data=self.get_keydata_all()
## getting all the sleep ids
if self.all_sleep is not None:
## All sleep data already pulled
return self.all_sleep
else:
sleep_ids=data['sleep.id'].values.tolist()
sleep_list=[int(x) for x in sleep_ids if pd.isna(x)==False]
all_sleep=pd.DataFrame()
for s in sleep_list:
m=self.pull_sleep_main(s)
all_sleep=pd.concat([all_sleep,m])
## Cleaning sleep data
sleep_update=['qualityDuration','latency','debtPre','debtPost','needFromStrain','sleepNeed',
'habitualSleepNeed','timeInBed','lightSleepDuration','slowWaveSleepDuration',
'remSleepDuration','wakeDuration','arousalTime','noDataDuration','creditFromNaps',
'projectedSleep']
for col in sleep_update:
all_sleep[col]=all_sleep[col].astype(float).apply(lambda x: np.nan if np.isnan(x) else x/60000)
all_sleep.drop(['during.bounds'],axis=1,inplace=True)
self.all_sleep=all_sleep.copy(deep=True)
all_sleep.drop(['events'],axis=1,inplace=True)
return all_sleep
else:
print("Please run the authorization function first")
def get_sleep_events_all(self):
'''
This function returns all sleep events in a data frame, for the duration of user's WHOOP membership.
Each row in the data frame represents an individual sleep event within an individual night of sleep.
Sleep events can be joined against the sleep or main datasets by sleep id.
All sleep times are returned in minutes.
'''
if self.auth_code:
if self.all_data is not None:
## use existing
data=self.all_data
else:
## pull timeframe data
data=self.get_keydata_all(start,end)
## getting all the sleep ids
if self.all_sleep_events is not None:
## All sleep data already pulled
return self.all_sleep_events
else:
if self.all_sleep is not None:
sleep_events=self.all_sleep[['activityId','events']]
all_sleep_events=pd.concat([pd.concat([pd.json_normalize(events),
pd.DataFrame({'id':len(events)*[sleep]})],axis=1) for events, sleep in zip(sleep_events['events'],sleep_events['activityId'])])
else:
sleep_ids=data['sleep.id'].values.tolist()
sleep_list=[int(x) for x in sleep_ids if | pd.isna(x) | pandas.isna |
import os
import sys
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, compat
from pandas.util import testing as tm
class TestToCSV:
@pytest.mark.xfail((3, 6, 5) > sys.version_info >= (3, 5),
reason=("Python csv library bug "
"(see https://bugs.python.org/issue32255)"))
def test_to_csv_with_single_column(self):
# see gh-18676, https://bugs.python.org/issue32255
#
# Python's CSV library adds an extraneous '""'
# before the newline when the NaN-value is in
# the first row. Otherwise, only the newline
# character is added. This behavior is inconsistent
# and was patched in https://bugs.python.org/pull_request4672.
df1 = DataFrame([None, 1])
expected1 = """\
""
1.0
"""
with | tm.ensure_clean('test.csv') | pandas.util.testing.ensure_clean |
import pandas as pd
import numpy as np
import gspread_dataframe as gd
import gspread as gs
import setup
import queries
# csv export of historical sales
sales_master = pd.read_csv('Inventory Manager/historical_sales.csv')
# dropping na values, filtering out samples
sales_master = sales_master.dropna()
sales_master = sales_master[sales_master['Sample'] == 'N']
# adding in datetime fields for segmentation
sales_master['Delivery Date'] = pd.to_datetime(sales_master['Delivery Date'])
sales_master['Month'] = sales_master['Delivery Date'].dt.month
sales_master['Year'] = sales_master['Delivery Date'].dt.year
sales_master['Week'] = sales_master['Delivery Date'].dt.isocalendar().week
# limiting data to only directly purchased and managed inventory
sales_master_no_dsw = sales_master[sales_master['Warehouse'] != 'DSW']
# global monthly sales
ind = ['Item Description: Product Family', 'Item Description: Size']
cols = ['Year', 'Month']
monthly_sales_global = pd.pivot_table(sales_master_no_dsw, values='Cases Sold', index=ind, columns=cols, aggfunc=np.sum).reset_index()
monthly_sales_global = monthly_sales_global.fillna(0)
# monthly sales by warehouse
warehouses = ['SBC1', 'CAW1', 'ILL1', 'VAW1']
ind = ['Item Description: Product Family', 'Item Description: Size', 'Warehouse']
cols = ['Year', 'Month']
monthly_sales_wh = pd.pivot_table(sales_master_no_dsw, values='Cases Sold', index=ind, columns=cols, aggfunc=np.sum).reset_index()
monthly_sales_sbc1 = monthly_sales_wh[monthly_sales_wh['Warehouse'] == warehouses[0]].fillna(0)
monthly_sales_caw1 = monthly_sales_wh[monthly_sales_wh['Warehouse'] == warehouses[1]].fillna(0)
monthly_sales_ill1 = monthly_sales_wh[monthly_sales_wh['Warehouse'] == warehouses[2]].fillna(0)
monthly_sales_vaw1 = monthly_sales_wh[monthly_sales_wh['Warehouse'] == warehouses[3]].fillna(0)
# import dfs from queries sheet
tx_global = queries.tx_global
tx_wh_all = queries.tx_wh_all
base_table = queries.base_table
# create list of t-x dataframes for each warehouse, callable based off position in warehouse list
tx_whs = [tx_wh_all[tx_wh_all.index.get_level_values(1) == wh] for wh in warehouses]
# creation of all base templates specifc to each depletion report style
global_base = base_table[['Product Family', 'Description', 'Current Vintage', 'Country', 'Size',
'Bottles/Case', 'Item Cost NJ', 'Total Cases OH', 'NJ Cases OH', 'CA Cases OH',
'Total Cases Committed', 'Total Inv Value', 'NJ Cases on Order', 'Cases on Next Drop', 'Next Drop Date']]
sbc1_base = base_table[['Product Family', 'Description', 'Current Vintage', 'Country', 'Size',
'Bottles/Case', 'Item Cost NJ', 'NJ Cases OH', 'NJ Cases Committed',
'NJ Cases Available', 'NJ Inv Value', 'NJ Cases on Order', 'Cases on Next Drop',
'Next Drop Date']]
caw1_base = base_table[['Product Family', 'Description', 'Current Vintage', 'Country', 'Size',
'Bottles/Case', 'Item Cost CA', 'CA Cases OH', 'CA Cases Committed',
'CA Cases Available', 'CA Inv Value', 'CA Cases on Order']]
ill1_base = base_table[['Product Family', 'Description', 'Current Vintage', 'Country', 'Size',
'Bottles/Case', 'Item Cost IL', 'IL Cases OH', 'IL Cases Comitted',
'IL Cases Available', 'IL Inv Value']]
vaw1_base = base_table[['Product Family', 'Description', 'Current Vintage', 'Country', 'Size',
'Bottles/Case', 'Item Cost VA', 'VA Cases OH', 'VA Cases Committed',
'VA Cases Available', 'VA Inv Value']]
# joining t-x sales data to respective base template
global_report = (global_base.join(tx_global)
.drop('Item_Name__c', axis=1)
.sort_values('Description'))
global_report.iloc[:, -5:] = global_report.iloc[:, -5:].fillna(0)
sbc1 = (sbc1_base.join(tx_whs[0].reset_index(level=1))
.drop(['Warehouse__c', 'Item_Name__c'], axis=1)
.sort_values('Description'))
sbc1 = sbc1[(sbc1['NJ Cases OH'] > 0) | (sbc1['NJ Cases on Order'] > 0)]
sbc1.iloc[:, -5:] = sbc1.iloc[:, -5:].fillna(0)
caw1 = (caw1_base.join(tx_whs[1].reset_index(level=1))
.drop(['Warehouse__c', 'Item_Name__c'], axis=1)
.sort_values('Description'))
caw1 = caw1[(caw1['CA Cases OH'] > 0) | (caw1['CA Cases on Order'] > 0)]
caw1.iloc[:, -5:] = caw1.iloc[:, -5:].fillna(0)
ill1 = (ill1_base.join(tx_whs[2].reset_index(level=1))
.drop(['Warehouse__c', 'Item_Name__c'], axis=1)
.sort_values('Description'))
ill1 = ill1[ill1['IL Cases OH'] > 0]
ill1.iloc[:, -5:] = ill1.iloc[:, -5:].fillna(0)
vaw1 = (vaw1_base.join(tx_whs[3].reset_index(level=1))
.drop(['Warehouse__c', 'Item_Name__c'], axis=1)
.sort_values('Description'))
vaw1 = vaw1[vaw1['VA Cases OH'] > 0]
vaw1.iloc[:, -5:] = vaw1.iloc[:, -5:].fillna(0)
inv_reports = [global_report, sbc1, caw1, ill1, vaw1]
global_report['Months Inv OH'] = ((global_report['Total Cases OH']
- global_report['Total Cases Committed'])
/ global_report['Cases Sold: T-30'])
sbc1['Months Inv OH'] = (sbc1['NJ Cases Available'] / sbc1['Cases Sold: T-30']).round(1)
caw1['Months Inv OH'] = (caw1['CA Cases Available'] / caw1['Cases Sold: T-30']).round(1)
ill1['Months Inv OH'] = (ill1['IL Cases Available'] / ill1['Cases Sold: T-30']).round(1)
vaw1['Months Inv OH'] = (vaw1['VA Cases Available'] / vaw1['Cases Sold: T-30']).round(1)
for df in inv_reports:
df['Months Inv OH'] = df['Months Inv OH'].replace([np.inf, -np.inf], np.nan).round(1)
df.reset_index(inplace=True)
# joining all historical monthly sales data to reports
## global master
global_joined = global_report.merge(monthly_sales_global, how='left', left_on=['Product Family', 'Size'],
right_on=['Item Description: Product Family', 'Item Description: Size'])
global_master = global_joined.drop([('Item Description: Product Family', ''), ('Item Description: Size', '')], axis=1)
## sbc1 master
sbc1_joined = sbc1.merge(monthly_sales_sbc1, how='left', left_on=['Product Family', 'Size'],
right_on=['Item Description: Product Family', 'Item Description: Size'])
sbc1_master = sbc1_joined.drop([('Item Description: Product Family', ''), ('Item Description: Size', ''),
('Warehouse', '')], axis=1)
## caw1 master
caw1_joined = caw1.merge(monthly_sales_caw1, how='left', left_on=['Product Family', 'Size'],
right_on=['Item Description: Product Family', 'Item Description: Size'])
caw1_master = caw1_joined.drop([('Item Description: Product Family', ''), ('Item Description: Size', ''),
('Warehouse', '')], axis=1)
## ill1 master
ill1_joined = ill1.merge(monthly_sales_ill1, how='left', left_on=['Product Family', 'Size'],
right_on=['Item Description: Product Family', 'Item Description: Size'])
ill1_master = ill1_joined.drop([('Item Description: Product Family', ''), ('Item Description: Size', ''),
('Warehouse', '')], axis=1)
## vaw1 master
vaw1_joined = vaw1.merge(monthly_sales_vaw1, how='left', left_on=['Product Family', 'Size'],
right_on=['Item Description: Product Family', 'Item Description: Size'])
vaw1_master = vaw1_joined.drop([('Item Description: Product Family', ''), ('Item Description: Size', ''),
('Warehouse', '')], axis=1)
# list of master inventory reports to perform final modifications on
master_dfs = [global_master, sbc1_master, caw1_master, ill1_master, vaw1_master]
# function list to modify final reports
## function to subtract X amount of months from current date, returns tuple of (year, month)
def month_sbtrkt(months_from_today):
year = (pd.Timestamp.today() + pd.tseries.offsets.DateOffset(months=months_from_today)).year
month = (pd.Timestamp.today() + pd.tseries.offsets.DateOffset(months=months_from_today)).month
return (year, month)
## function to print month name and year
def month_namer(months_from_today):
year = (pd.Timestamp.today() + pd.tseries.offsets.DateOffset(months=months_from_today)).year
month = (pd.Timestamp.today() + pd.tseries.offsets.DateOffset(months=months_from_today)).month_name()
return month, year
## function to predict next X months of sales
def depletion_estimator_6mons(df):
last_3_mons_YoY = df['Trailing 3 Months YoY Trend']
t30 = df['Cases Sold: T-30']
estimates = {}
for i in range(1,7):
x = np.where(last_3_mons_YoY.isnull() == False,
((last_3_mons_YoY * df[month_sbtrkt(-12 + i)]) + df[month_sbtrkt(-12 + i)]).round(2),
t30)
estimates['forecast:', month_namer(i)] = x
return pd.DataFrame(estimates)
## function to provide predicted starting inventory level for next X months, warehouses that intake product
def cases_oh_estimator_mains(df, csohkey, dropdatekey, dropqtykey):
setup = pd.DataFrame()
oh = pd.DataFrame()
cases_left_this_month = df['Current Month Forecast'] - df['Cases Sold This Month']
setup['cases_left_this_month'] = cases_left_this_month
for i in range (1,7):
y = np.where(df[dropdatekey].dt.month == (pd.Timestamp.today() + pd.tseries.offsets.DateOffset(months=(i-1))).month,
df[dropqtykey] - df['forecast:', month_namer(i)], 0 - df['forecast:', month_namer(i)])
setup['delta:', month_namer(i)] = y
oh['Estimated Cases OH', month_namer(1)] = np.where(df[dropdatekey].dt.month == pd.Timestamp.today().month,
df[csohkey] + df[dropqtykey] - setup['cases_left_this_month'],
df[csohkey] - setup['cases_left_this_month'])
for i in range(2,7):
oh['Estimated Cases OH', month_namer(i)] = oh['Estimated Cases OH', month_namer(i-1)] + setup['delta:', month_namer(i-1)]
return oh
## function to provide predicted starting inventory for next X months, warehouses that have inventory transferred
def cases_oh_estimator_secondary(df, csohkey):
setup = pd.DataFrame()
oh = pd.DataFrame()
cases_left_this_month = df['Current Month Forecast'] - df['Cases Sold This Month']
setup['cases_left_this_month'] = cases_left_this_month
oh['Estimated Cases OH', month_namer(1)] = df[csohkey] - setup['cases_left_this_month']
for i in range(2,7):
oh['Estimated Cases OH', month_namer(i)] = oh['Estimated Cases OH', month_namer(i-1)] - df['forecast:', month_namer(i-1)]
return oh
for df in master_dfs:
# segment out cases sold into 30 day intervals
df['Cases Sold: T-120:90'] = df['cases_sold_t120'] - df['cases_sold_t90']
df['Cases Sold: T-90:60'] = df['cases_sold_t90'] - df['cases_sold_t60']
df['Cases Sold: T-60:30'] = df['cases_sold_t60'] - df['Cases Sold: T-30']
# add 30 day trend
df['30 Day Trend'] = (df['Cases Sold: T-30'] - df['Cases Sold: T-60:30']) / df['Cases Sold: T-60:30']
df['30 Day Trend'] = df['30 Day Trend'].replace([np.inf, -np.inf], np.nan).round(1)
# add 7 day trend
df['7 Day Trend'] = (df['Cases Sold: T-7'] - (df['Cases Sold: T-30']*(7/30))) / (df['Cases Sold: T-30']*(7/30))
df['7 Day Trend'] = df['7 Day Trend'].replace([np.inf, -np.inf], np.nan).round(1)
# add last 3 month YoY trend
df['Trailing 3 Months YoY Trend'] = np.clip((((df[month_sbtrkt(-1)] +
df[month_sbtrkt(-2)] +
df[month_sbtrkt(-3)]) -
(df[month_sbtrkt(-13)] +
df[month_sbtrkt(-14)] +
df[month_sbtrkt(-15)])) /
(df[month_sbtrkt(-13)] +
df[month_sbtrkt(-14)] +
df[month_sbtrkt(-15)])).replace([np.inf, -np.inf], np.nan).round(2), -1, 1)
# add estimator for current month total sales
df['Current Month Forecast'] = (df['Cases Sold This Month'] /
(pd.Timestamp.today().day / | pd.Timestamp.today() | pandas.Timestamp.today |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#import seaborn as sns
import glob
from parse import parse
#defaultdict to use nested dictionaries
from collections import defaultdict
#quantiles calculation
from scipy.stats.mstats import mquantiles
#datetime conversion
from dateutil import parser
#statistical tools
from statsmodels import robust
import statsmodels.api as sm
#dates
import matplotlib.dates as mdates
#patches for legend
import matplotlib.patches as mpatches
from matplotlib.patches import Patch
#for legend to avoid repeating duplicates labels
from collections import OrderedDict
import seaborn as sns
#calculate area under curve for ROC curve:
from sklearn.metrics import auc
#find local peaks of a 2d function
from scipy.signal import find_peaks
#decide color series
import itertools
def dictionary(fileformat='std', pattern="/home/ciccuz/hydro/forecasts/cosmoe_prevah/cosmoe_{simul_time}/cosmoe_{something}_{Name}/{otherstuff}",
folders_pattern = '/home/ciccuz/hydro/forecasts/cosmoe_prevah/cosmoe_*'):
"""
Open every simulations run for different starting simulation times and create a dictionary of dataframes nested in
this way: based on the simulation time, choosing the realization (e.g. 'rm00_pin01') you have a dataframe of different
paramters and a number of rows given by the hourly time points
"""
#create a nested dictionary with two 'levels' to contain a list of dataframes for every simulation time
#and every ensemble member
nested_dict = lambda: defaultdict(nested_dict)
nested_df_collection = nested_dict()
#pattern to rename every dataframe (w different 'filepath') of the collection by the name of the simulation
#pattern = "/home/ciccuz/hydro/forecasts/cosmoe_prevah/cosmoe_{simul_time}/cosmoe_{something}_{Name}/{otherstuff}"
#sim_dates: string array to store the renamed 'filetime' variables with the initialization time of the simulation
sim_dates = ["" for filetime in sorted(glob.iglob(folders_pattern))]
i = 0
#conditions on fileformat given in input to write the dataframes in the dictionary:
if fileformat == 'q':
skiprows = [1]
usecols = range(12)
columns = ['year', 'month', 'day', 'hour', 'RTOT', 'RTOT (l s-1 )', 'R0', 'R1', 'R2', 'RG1', 'RG2', 'RG3']
if fileformat == 'std':
skiprows = [0,1]
usecols = range(20)
columns = ['year', 'month', 'day', 'hour', 'NRTFL', 'P-uk', 'P-kor', 'P-SNO', 'EPOT', 'EREA', 'RO', 'R1', 'R2', 'RGES', 'S-SNO', 'SI', 'SSM', 'SUZ', 'SLZ', '??1', '??2', '??3', '??4']
#for loop for every simulation made at different times
for filetime in sorted(glob.iglob(folders_pattern)):
#for loop to read every *.std/*.q file in every subdirectory present, sorted by name, and to create an array of
#dataframes
#(all data in files *.q except RTOT (l s-1) are dimensioned in mm/h)
#before that, if condition for distinguish different patterns considering the forecasts or the prec obs
if folders_pattern == '/home/ciccuz/hydro/forecasts/cosmoe_prevah/cosmoe_*':
subfold = '/*/*.'
elif folders_pattern == '/home/ciccuz/hydro/PrecObs/cosmo1_*':
subfold = '/*.'
for filepath in sorted(glob.iglob(filetime + subfold + fileformat)):
nested_df_collection[filetime][filepath] = pd.DataFrame(pd.read_csv(filepath, skiprows=skiprows,
delim_whitespace=True, header=None,
names=columns,
usecols=usecols))
if fileformat == 'q':
nested_df_collection[filetime][filepath].columns = columns
#add complete date column to every dataframe
nested_df_collection[filetime][filepath]['date'] = pd.to_datetime(nested_df_collection[filetime]
[filepath][['year', 'month', 'day',
'hour']])
# If considering ensemble members: change name of every dataframe ('filepath') of the dictionary by its
# simulation name (depending on ensemble member and parameter set used)
if folders_pattern == '/home/ciccuz/hydro/forecasts/cosmoe_prevah/cosmoe_*':
newname_filepath = parse(pattern + fileformat, filepath)
nested_df_collection[filetime][newname_filepath['Name']] = nested_df_collection[filetime].pop(filepath)
elif folders_pattern == '/home/ciccuz/hydro/PrecObs/cosmo1_*':
newname_filepath = parse(pattern + fileformat, filepath)
nested_df_collection[filetime][newname_filepath['otherstuff']] = nested_df_collection[filetime].pop(filepath)
#change name of every simulation time ('filetime') substituting it with the date of the simulation
#locate characters for year, month, day, hour in filetime strings
#if condition to account for cosmoe data or cosmo1 (for prec obs):
if folders_pattern == '/home/ciccuz/hydro/forecasts/cosmoe_prevah/cosmoe_*' :
sim_year = filetime[50:54] #[70:74] second ones used for longer file patterns i.e. located in deeper subfolders
sim_month = filetime[54:56] #[74:76]
sim_day = filetime[56:58] #[76:78]
sim_hour = filetime[58:60] #[78:80]
#condition on hour: 00 or 12 UTC simulation start
if sim_hour[0] == '0':
sim_hour = '00'
else:
sim_hour = '12'
elif folders_pattern == "/home/ciccuz/hydro/PrecObs/cosmo1_*":
sim_year = filetime[34:38]
sim_month = filetime[38:40]
sim_day = filetime[40:42]
sim_hour = filetime[42:44]
if sim_hour[0] == '0':
sim_hour = '00'
sim_dates[i] = (sim_year+'-'+sim_month+'-'+sim_day+' '+sim_hour+':00:00')
nested_df_collection[sim_dates[i]] = nested_df_collection.pop(filetime)
i = i+1
return nested_df_collection
def prec_obs_series():
'''
Read all the precipitation data obtained by a combination of COSMO1 and pluviometer data to obtain a precipitation series
to be used as observation series.
WARNING: for the day 2-11-2018 the data at 12:00 is missing!
'''
# Create a dictionary of all precipitation datasets (obtained with COSMO1) present at different sim_start
prec_obs_df = dictionary(pattern="/home/ciccuz/hydro/PrecObs/cosmo1_{simul_time}/{otherstuff}",
folders_pattern = '/home/ciccuz/hydro/PrecObs/cosmo1_*')
# Create a dataframe that will contain the "observed" precipitation series obtained by the different simulations/pluviometer
# data interpolated of precipitation by taking the first 12 hours for every series in prec_obs_df and concatenate all of them
obs_prec = pd.DataFrame(columns = ['year', 'month', 'day', 'hour', 'P-uk', 'P-kor', 'date'])
#array of dates to consider every simulation start at 12 utc from 23-10 to 9-11 2018
sim_starts = ['2018-10-23 12:00:00', '2018-10-24 12:00:00', '2018-10-25 12:00:00', '2018-10-26 12:00:00',
'2018-10-27 12:00:00', '2018-10-28 12:00:00', '2018-10-29 12:00:00', '2018-10-30 12:00:00',
'2018-10-31 12:00:00', '2018-11-01 12:00:00', '2018-11-02 13:00:00', '2018-11-03 12:00:00',
'2018-11-04 12:00:00', '2018-11-05 12:00:00', '2018-11-06 12:00:00', '2018-11-07 12:00:00',
'2018-11-08 12:00:00', '2018-11-09 12:00:00']
i=0
for sim_start in sim_starts:
prec_set = prec_obs_df[sim_start]['Ver500.']
#Compute the subset to consider just the 24 h above the initialization time:
#to do so we need to do some if conditions because on the 2-11 the simulation starting at 12 is not present!
if sim_start == '2018-11-01 12:00:00' :
prec_subset = prec_set.loc[(prec_set.date >= sim_start) & (prec_set.index <= 443)].drop(['NRTFL', 'P-SNO', 'EPOT', 'EREA',
'RO', 'R1', 'R2', 'RGES', 'S-SNO', 'SI', 'SSM', 'SUZ', 'SLZ', '??1'], axis=1)
prec_subset.index = range(i*24,i*24+24+1)
if sim_start == '2018-11-02 13:00:00':
prec_subset = prec_set.loc[(prec_set.date >= sim_start) & (prec_set.index <= 442)].drop(['NRTFL', 'P-SNO', 'EPOT', 'EREA',
'RO', 'R1', 'R2', 'RGES', 'S-SNO', 'SI', 'SSM', 'SUZ', 'SLZ', '??1'], axis=1)
prec_subset.index = range(i*24+1,i*24+24)
else:
prec_subset = prec_set.loc[(prec_set.date >= sim_start) & (prec_set.index <= 442)].drop(['NRTFL', 'P-SNO', 'EPOT', 'EREA',
'RO', 'R1', 'R2', 'RGES', 'S-SNO', 'SI', 'SSM', 'SUZ', 'SLZ', '??1'], axis=1)
prec_subset.index = range(i*24,i*24+24)
obs_prec = | pd.concat([obs_prec, prec_subset]) | pandas.concat |
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from utils.utils import CONSTANTS
from utils.utils import publication_plot_pred_act, publication_plot_residuals
from use_crabnet import predict_crabnet
from use_densenet import predict_densenet
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
# %%
plt.rcParams.update({'font.size': 16})
cons = CONSTANTS()
mat_props_units = cons.mp_units_dict
mat_props = cons.mps
mat_props_names = cons.mp_names
pretty_mp_names = cons.mp_names_dict
# %%
def plot_compare_lcs(times,
maes,
mat_prop,
classic_results=None,
ax=None):
mp_sym_dict = cons.mp_sym_dict
mp_units_dict = cons.mp_units_dict
fig = None
if classic_results is not None:
classic_time = classic_results[0]
classic_mae = classic_results[1]
crab_time, dense_time = times
crab_mae, dense_mae = maes
x_crab = np.arange(len(crab_mae))
x_dense = np.arange(len(dense_mae))
x_crab = np.linspace(0, crab_time, len(crab_mae))
x_dense = np.linspace(0, dense_time, len(dense_mae))
# Plot training curve
if ax is None:
fig, ax = plt.subplots(figsize=(6, 6))
ax.plot(x_crab, crab_mae,
'-', color=cons.crab_red, marker='o', ms=0, alpha=1,
label='CrabNet')
ax.plot(x_dense, dense_mae,
'-', color=cons.dense_blue, marker='s', ms=0, alpha=1,
label='DenseNet')
ax.axhline(np.min(dense_mae), color=cons.dense_blue, linestyle='--',
alpha=1)
ax.set_xlabel('Training time [s]')
ax.plot([crab_time, dense_time], [crab_mae.iloc[-5:].mean(),
dense_mae.iloc[-5:].mean()],
'kX', ms=14, mfc='gold', label='1000 epochs')
ymax = 1.5*np.mean(dense_mae)
if classic_results is not None:
classic_x = classic_time
classic_y = 1.5*np.mean(dense_mae)
if classic_time > 1.2 * np.max(crab_time):
classic_x = np.max(crab_time)
ax.plot([classic_x*(14/20), classic_x], [classic_mae, classic_mae],
'g-', linewidth=5)
ax.plot(classic_x, classic_mae, '>', mec='green', ms=12,
mfc='white', mew=3, label='Best classic')
ax.text(classic_x, classic_mae, f'({classic_time:0.0f} s) \n',
horizontalalignment='right', verticalalignment='center')
elif classic_mae > ymax:
classic_mae = ymax * 0.97
ax.plot([classic_x, classic_x], [classic_mae*(16.5/20), classic_mae],
'g-', linewidth=5)
ax.plot(classic_x, classic_mae, '^', mec='green', ms=12,
mfc='white', mew=3, label='Best classic')
txt = f'\n\n({classic_mae:0.2f} {mp_units_dict[mat_prop]}) '
ax.text(classic_x, classic_mae*(16.5/20), txt,
horizontalalignment='center', verticalalignment='center')
else:
ax.plot(classic_x, classic_mae, 'o', mec='green', ms=12,
mfc='white', mew=4, label='Best classic')
ax.set_ylabel(f'MAE of {mp_sym_dict[mat_prop]} '
f'[{mp_units_dict[mat_prop]}]')
ax.set_ylim(np.min(crab_mae)/1.5, ymax)
ax.tick_params(left=True, top=True, right=True, direction='in', length=7)
ax.tick_params(which='minor', left=True, top=True, right=True,
direction='in', length=4)
minor_locator_x = AutoMinorLocator(2)
minor_locator_y = AutoMinorLocator(2)
ax.xaxis.set_minor_locator(minor_locator_x)
ax.yaxis.set_minor_locator(minor_locator_y)
# Get all plot labels for legend and label legend
lines, labels = ax.get_legend_handles_labels()
ax.legend(lines,
labels,
loc='best',
prop={'size': 12})
if fig is not None:
return fig
def multi_plots_lcs(nn_dir, classics_dir):
files = os.listdir(classics_dir)
classics_results_csv = classics_dir + [file for file in files
if 'test_scores.csv' in file][0]
df_classics = pd.read_csv(classics_results_csv)
files = os.listdir(nn_dir)
# print(files)
nn_results_csv = nn_dir + [file for file in files
if 'all_results' in file
if '.csv' in file][0]
df_nn = pd.read_csv(nn_results_csv)
mat_props = df_nn['mat_prop'].unique()
seeds = df_nn['rng_seed'].unique()
seed_values = {seed: 0 for seed in seeds}
df_crabnet = df_nn[df_nn['model_type'] == 'CrabNet']
for mp in mat_props:
df_mp = df_crabnet
mp_bools = df_mp['mat_prop'] == mp
best_mae = np.min(df_mp[mp_bools]['mae_val'])
pc_mae = (df_mp[mp_bools]['mae_val'] - best_mae) / best_mae
imp_col = pd.Series(pc_mae, name='improvement')
df_mp = pd.concat([df_mp, imp_col], axis=1)
df_mp = df_mp[df_mp['mat_prop'] == mp].sort_values(by='improvement')
df_mp_seeds = df_mp['rng_seed']
for i, seed in enumerate(df_mp_seeds):
seed_values[seed] += (df_mp.iloc[i]['improvement'])
ranked_seeds = pd.Series(seed_values).sort_values()
seed = ranked_seeds.index[0]
df_nn = df_nn[df_nn['rng_seed'] == seed]
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 12))
mats = ['energy_atom', 'Egap', 'agl_thermal_conductivity_300K',
'ael_debye_temperature']
for mp, ax in zip(mats, axes.ravel()):
run_ids = df_nn[df_nn['mat_prop'] == mp]
crab_id = run_ids[run_ids['model_type'] == 'CrabNet']['id'].values[0]
dense_id = run_ids[run_ids['model_type'] == 'DenseNet']['id'].values[0]
crab_df = pd.read_csv(f'{nn_dir}/{crab_id}/progress.csv')
dense_df = pd.read_csv(f'{nn_dir}/{dense_id}/progress.csv')
crab_maes = crab_df['mae_val']
dense_maes = dense_df['mae_val']
crab_bools = run_ids['model_type'] == 'CrabNet'
dense_bools = run_ids['model_type'] == 'DenseNet'
crab_time = run_ids[crab_bools]['fit_time'].values[0]
dense_time = run_ids[dense_bools]['fit_time'].values[0]
df_classic = df_classics[df_classics['mat_prop'] == mp]
classic_mae = df_classic['mae_test'].values[0]
classic_time = df_classic['fit_time'].values[0]
plot_compare_lcs((crab_time, dense_time),
(crab_maes, dense_maes),
mp,
(classic_time, classic_mae),
ax=ax)
plt.subplots_adjust(wspace=0.22)
out_dir = r'figures/learning_curves/'
os.makedirs(out_dir, exist_ok=True)
fig_file = os.path.join(out_dir, f'four_panel_learning_curve.png')
if fig is not None:
fig.savefig(fig_file,
dpi=300,
bbox_inches='tight')
# %%
def plot_dense_crab_preds(mp, ax):
test_file = f'test_files/{mp}_test.csv'
fig = None
if ax is None:
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
y_act_dense, y_pred_dense = predict_densenet(mp, test_file)
fig_dense = publication_plot_pred_act(y_act_dense,
y_pred_dense,
mat_prop=mp,
model='DenseNet',
ax=ax[0])
y_act_crab, y_pred_crab = predict_crabnet(mp, test_file)
fig_crab = publication_plot_pred_act(y_act_crab,
y_pred_crab,
mat_prop=mp,
model='CrabNet',
ax=ax[1])
if fig is not None:
return fig
def multi_plots_preds():
mat_props = ['energy_atom', 'Egap']
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 12))
for i, mp in enumerate(mat_props):
ax = axes[i, :]
ax = plot_dense_crab_preds(mp, ax)
plt.subplots_adjust(wspace=0.22)
out_dir = r'figures/pred_vs_act/'
os.makedirs(out_dir, exist_ok=True)
fig_file = os.path.join(out_dir, f'four_panel_pred_vs_act.png')
if fig is not None:
fig.savefig(fig_file,
dpi=300,
bbox_inches='tight')
# %%
def plot_dense_crab_residuals(mp, ax):
test_file = f'test_files/{mp}_test.csv'
fig = None
if ax is None:
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
y_act_dense, y_pred_dense = predict_densenet(mp, test_file)
fig_dense = publication_plot_residuals(y_act_dense,
y_pred_dense,
mat_prop=mp,
model='DenseNet',
ax=ax[0])
y_act_crab, y_pred_crab = predict_crabnet(mp, test_file)
fig_crab = publication_plot_residuals(y_act_crab,
y_pred_crab,
mat_prop=mp,
model='CrabNet',
ax=ax[1])
y0_min, y0_max = ax[0].get_ylim()
y1_min, y1_max = ax[1].get_ylim()
y_min_min = np.min([y0_min, y1_min])
y_max_max = np.max([y0_max, y1_max])
ax[0].set_ylim(y_min_min, y_max_max)
ax[1].set_ylim(y_min_min, y_max_max)
if fig is not None:
return fig
def multi_plots_residuals():
mat_props = ['energy_atom', 'Egap']
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 12))
for i, mp in enumerate(mat_props):
ax = axes[i, :]
ax = plot_dense_crab_residuals(mp, ax)
plt.subplots_adjust(wspace=0.22)
out_dir = r'figures/residuals/'
os.makedirs(out_dir, exist_ok=True)
fig_file = os.path.join(out_dir, f'four_panel_residuals.png')
if fig is not None:
fig.savefig(fig_file,
dpi=300,
bbox_inches='tight')
# %%
def get_figures(nn_dir, classics_dir):
files = os.listdir(classics_dir)
classics_results_csv = classics_dir + [file for file in files if
'test_scores.csv' in file][0]
df_classics = pd.read_csv(classics_results_csv)
files = os.listdir(nn_dir)
# print(files)
nn_results_csv = nn_dir + [file for file in files
if 'all_results' in file
if '.csv' in file][0]
df_nn = | pd.read_csv(nn_results_csv) | pandas.read_csv |
'''
[[https://eightsleep.com][EightSleep]] data
'''
REQUIRES = [
'git+https://github.com/hpi/eight-sleep',
]
from dataclasses import dataclass
from pathlib import Path
from typing import Sequence, Iterable
from my.core import Paths, get_files
from my.config import eightsleep as user_config
@dataclass
class eightsleep(user_config):
# paths[s]/glob to the exported JSON data
export_path: Paths
def inputs() -> Sequence[Path]:
return get_files(eightsleep.export_path)
import eightsleep.dal as dal
def sessions():
_dal = dal.DAL(inputs())
yield from _dal.sessions()
from my.core.pandas import check_dataframe, DataFrameT
@check_dataframe
def dataframe(defensive: bool=True) -> DataFrameT:
def it():
for s in sessions():
try:
d = {
'ts' : s['ts'],
'score' : s['score'],
'stages' : pd.DataFrame(s['stages']),
'tossAndTurns' : pd.DataFrame(s['tossAndTurns']),
'tempRoomC' : pd.DataFrame(s['tempRoomC']),
'tempBedC' : pd.DataFrame(s['tempBedC']),
'respiratoryRate' : | pd.DataFrame(s['respiratoryRate']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import csv
import json
import re
import pandas as pd
import scrapy
def write(path, df):
try:
base = | pd.read_csv(path, index_col=0) | pandas.read_csv |
# Licensed to Elasticsearch B.V under one or more agreements.
# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information
import warnings
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import (
is_float_dtype,
is_bool_dtype,
is_integer_dtype,
is_datetime_or_timedelta_dtype,
is_string_dtype,
)
from pandas.core.dtypes.inference import is_list_like
from typing import NamedTuple, Optional
class Field(NamedTuple):
"""Holds all information on a particular field in the mapping"""
index: str
es_field_name: str
is_source: bool
es_dtype: str
es_date_format: Optional[str]
pd_dtype: type
is_searchable: bool
is_aggregatable: bool
is_scripted: bool
aggregatable_es_field_name: str
@property
def is_numeric(self) -> bool:
return is_integer_dtype(self.pd_dtype) or is_float_dtype(self.pd_dtype)
@property
def is_timestamp(self) -> bool:
return | is_datetime_or_timedelta_dtype(self.pd_dtype) | pandas.core.dtypes.common.is_datetime_or_timedelta_dtype |
# Copyright (c) 2009-2020 <NAME> <<EMAIL>>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
""" Data preprocessing to create GimmeMotifs input. """
# Python imports
import os
import sys
import logging
import multiprocessing
from tempfile import NamedTemporaryFile
# External imports
import genomepy
import numpy as np
import pysam
from fluff.fluffio import load_heatmap_data
import pandas as pd
from pybedtools import BedTool
from sklearn.preprocessing import scale
import qnorm
from tqdm.auto import tqdm
# gimme imports
from gimmemotifs.utils import determine_file_type
logger = logging.getLogger("gimme.preprocessing")
def coverage_table(
peakfile,
datafiles,
window,
log_transform=True,
normalization="none",
top=0,
topmethod="var",
rmdup=True,
rmrepeats=True,
ncpus=12,
):
for x in datafiles:
if not os.path.isfile(x):
print("ERROR: Data file '{0}' does not exist".format(x))
sys.exit(1)
for x in datafiles:
if ".bam" in x and not os.path.isfile("{0}.bai".format(x)):
print(
"Data file '{0}' does not have an index file."
" Creating an index file for {0}.".format(x)
)
pysam.index(x)
logger.info("Loading data")
data = {}
try:
# Load data in parallel
pool = multiprocessing.Pool(processes=ncpus)
jobs = []
for datafile in datafiles:
jobs.append(
pool.apply_async(
load_heatmap_data,
args=(
peakfile,
datafile,
1,
window // 2,
window // 2,
rmdup,
False,
rmrepeats,
None,
False,
None,
),
)
)
for job in tqdm(jobs):
track, regions, profile, guard = job.get()
data[os.path.splitext(track)[0]] = profile[:, 0]
except Exception as e:
sys.stderr.write("Error loading data in parallel, trying serial\n")
sys.stderr.write("Error: {}\n".format(e))
for datafile in tqdm(datafiles):
track, regions, profile, guard = load_heatmap_data(
peakfile,
datafile,
1,
window // 2,
window // 2,
rmdup,
False,
rmrepeats,
None,
False,
None,
)
data[os.path.splitext(track)[0]] = profile[:, 0]
# Create DataFrame with regions as index
regions = ["{}:{}-{}".format(*region[:3]) for region in regions]
df = | pd.DataFrame(data, index=regions) | pandas.DataFrame |
import os
import re
import json
import time
import numpy as np
import pandas as pd
from plotnine import *
# Config
PATH = os.getcwd()
path_n = re.split(pattern=r"/|\\", string=PATH)[1:]
if os.name == "posix":
path_n = "/" + os.path.join(*path_n)
else:
drive = PATH[0:3]
path_n = drive + os.path.join(*path_n)
RUNS = 100
def infer_column_cats(dir: "Path to working directoty.") -> tuple:
"""Helper function to identify dataset sizes based on file names."""
files = os.listdir(os.path.join(dir, "data"))
cats = set([re.match(pattern=".*_(.*).csv$", string=file).group(1) for file in files])
cols = set([re.match(pattern=".*_(.*)_.*.csv$", string=file).group(1) for file in files])
return cats, cols
def time_function(func: "Function call to be evaluted as str.") -> float:
"""Helper function to time data access."""
start = time.time()
exec(func)
return time.time() - start
def create_stats(measures: "List of function timings.",
col: "Current Column.", row: "Current Row",
scenario: "Current Scenario.") -> dict:
"""Helper function to create result dataset."""
return {"scenario": scenario,
"no_column": col,
"data_length": row,
"min": np.min(measures),
"max": np.max(measures),
"avg": np.mean(measures),
"q50": np.median(measures)}
scenarios = json.load(open(os.path.join(path_n, "output", "mutate.JSON")))
nrows, ncols = infer_column_cats(path_n)
timings, results = [], []
for col in ncols:
print(f"-Column: {col}--")
for row in nrows:
print(f"--Row: {row}")
data = pd.read_csv(os.path.join(path_n, "data", f"sim_data_{col}_{row}.csv"))
for i, scenario in enumerate(scenarios[col]["mutate"]):
print(f"---Scenario {i+1}: {scenario}---")
sel = re.search(pattern=r'([A-Z]{3})', string=scenario).group(1)
print(sel)
if sel == "INT":
func = f"temp['result'] = temp['{scenario}'] + 1"
elif sel == "DBL":
func = f"temp['result'] = temp['{scenario}'] * 2"
elif sel == "STR":
func = f"temp['result'] = temp['{scenario}'] + 'a'"
elif sel == "LGL":
func = f"temp['result'] = ~temp['{scenario}']"
for j in range(RUNS):
temp = data
timings.append(time_function(func=func))
temp = None
results.append(create_stats(measures=timings, col=col, row=row, scenario=sel))
print(results[-1])
timings = []
results_df = | pd.DataFrame(results) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# Author:
# <NAME>
# Emotional Sentiment on Twitter
# A coronavirus vaccine online firestorm
# In this python script you will find examples of some of the most common
# NLP (Natural Language Processing) techniques used to uncover patterns of
# sentiment and emotion on social media microblogging platforms like Twitter.
# It is organized as follows:
# - Step 1: Exploratory analysis
# - Step 2: Text processing
# - Step 3: Sentiment analysis
# - Step 4: Word frequency
# - Step 5: LDA topics extraction
# - Step 6: Emotion analysis
#
# ## Step 1: EXPLORATORY ANALYSIS
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from collections import defaultdict
from datetime import date
import re # for regular expressions
import string
# Importing the data
tweets = pd.read_csv('input/tweets.csv')
# getting the date column ready for datetime operations
tweets['datetime']= | pd.to_datetime(tweets['datetime']) | pandas.to_datetime |
import subprocess
import argparse
from pathlib import Path
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def parse_TMscore(result):
lines = result.split('\n')
for line in lines:
line_split = line.split()
if len(line_split) == 0:
continue
elif line_split[0] == 'TM-score':
tmscore = float(line_split[2])
elif line_split[0] == 'GDT-TS-score=':
gdtts = line_split[1]
elif line_split[0] == 'GDT-HA-score=':
gdtha = line_split[1]
return tmscore, gdtts, gdtha
def run_TMscore(native_pdb, model_pdb):
cmd = ['TMscore', model_pdb, native_pdb, '-outfmt', '-1']
result = subprocess.check_output(cmd)
return result.decode('utf-8')
def get_gdt(native_pdb, model_pdb):
result = run_TMscore(native_pdb, model_pdb)
tmscore, gdtts, gdtha = parse_TMscore(result)
return tmscore, gdtts, gdtha
def get_gdt_for_target(native_pdb_path, model_pdb_dir, blast_xml_csv_path, out_gdt_path):
model_array = []
tmscore_array = []
gdtts_array = []
gdtha_array = []
for model in model_pdb_dir.iterdir():
model_array.append(model.stem)
tmscore, gdtts, gdtha = get_gdt(native_pdb_path, model)
tmscore_array.append(tmscore)
gdtts_array.append(gdtts)
gdtha_array.append(gdtha)
df = pd.DataFrame({'TMscore': tmscore_array, 'GDT_TS': gdtts_array, 'GDT_HA': gdtha_array}, index=model_array)
df = df.astype('float')
df = df.sort_index()
df['target'] = [index.rsplit('_', 4)[0] for index in df.index]
df['template'] = [index.split('_', 2)[2].rsplit('_', 1)[0] for index in df.index]
df_template = | pd.read_csv(blast_xml_csv_path, index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 12 10:27:09 2018
@author: MichaelK
"""
import os
import pickle
import pandas as pd
import xarray as xr
import requests
from time import sleep
from lxml import etree
import itertools
from multiprocessing.pool import ThreadPool
#from pydap.client import open_url
from pydap.cas.urs import setup_session
from nasadap.util import parse_nasa_catalog, mission_product_dict, master_datasets
#from util import parse_nasa_catalog, mission_product_dict, master_datasets
#######################################
### Parameters
file_index_name = 'file_index.pickle'
#######################################
def download_files(url, path, session, master_dataset_list, dataset_types, min_lat, max_lat, min_lon, max_lon):
# print('Downloading and saving to...')
print(path)
# print(url)
counter = 4
while counter > 0:
try:
store = xr.backends.PydapDataStore.open(url, session=session)
ds = xr.open_dataset(store, decode_cf=False)
if 'nlon' in ds:
ds = ds.rename({'nlon': 'lon', 'nlat': 'lat'})
ds2 = ds[master_dataset_list].sel(lat=slice(min_lat, max_lat), lon=slice(min_lon, max_lon))
lat = ds2.lat.values
lon = ds2.lon.values
ds_date1 = ds.attrs['FileHeader'].split(';\n')
ds_date2 = dict([t.split('=') for t in ds_date1 if t != ''])
ds_date = | pd.to_datetime([ds_date2['StopGranuleDateTime']]) | pandas.to_datetime |
from surprise.model_selection import train_test_split
from surprise.model_selection import LeaveOneOut
from surprise import KNNBaseline
from surprise import Dataset, KNNBasic
from surprise import Reader
import heapq
from movies_analyzer.Movies import Movies, RATINGS, LINKS, MOVIES
from movies_recommender.utils import get_popularity_ranking
import pandas as pd
from operator import itemgetter
from surprise.similarities import cosine
class RecommendationDataSet:
def __init__(self, movies: Movies):
# train_test_split(dataset, test_size=test_size, random_state=1)
self.movies = movies
self.dataset_df = pd.read_csv(movies.movielens_path / RATINGS)
reader = Reader(line_format='user item rating timestamp', sep=',', skip_lines=1)
"""
line_format - list of columns
sep - separator for csv file
skip_lines - start from the second line
"""
self.dataset = Dataset.load_from_file(self.movies.movielens_path / RATINGS, reader=reader)
self.full_dataset = self.dataset.build_full_trainset()
# ranking
self.ratings, self.rankings = get_popularity_ranking(self.full_dataset)
# TRAINING
self.train_set, self.test_set = None, None
self.anti_test_set = None
self.leave_one_out_train_set = None
self.leave_one_out_test_set = None
self.leave_one_out_anti_test_set = None
self.similarity_algorithm = None
def clear_training(self):
self.train_set, self.test_set = None, None
self.anti_test_set = None
self.leave_one_out_train_set = None
self.leave_one_out_test_set = None
self.leave_one_out_anti_test_set = None
self.similarity_algorithm = None
def get_dataset_with_extended_user(self, watched):
"""
Create new dataset with new user, based only on the score of current movies.
:param
"""
df = | pd.DataFrame.from_dict(watched, orient='index', columns=['rating']) | pandas.DataFrame.from_dict |
'''
pyrainflow
Rainflow counting of time-history data in Python.
Based on the method described here:
https://community.sw.siemens.com/s/article/rainflow-counting
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def round_nearest(value, lst):
'''
Round ``value`` to the nearest number in list ``lst``. In cases where a
list value is equidistant the min of the two is returned.
Source: https://www.geeksforgeeks.org/python-find-closest-number-to-k-in-given-list/
'''
return lst[min(range(len(lst)), key=lambda i: abs(lst[i]-value))]
def hysteresis(signal, ratio=0.02, gate=None):
'''
Remove small fluctuations in a signal.
:param Series signal: The input data as a Pandas Series with the index as
the timeseries
:param num ratio: The fraction of the signals max range to use as the
gate, defaults to 2%
:param num gate: The small fluctuation cutoff as a fixed value, if
specified used in place of ``ratio``.
'''
gate_ = gate if gate else ratio*(max(signal) - min(signal))
drop = []
for i,v in enumerate(signal.iloc[:-3]):
v0 = v
v1 = signal.iat[i+1]
v2 = signal.iat[i+2]
if abs(v1-v0) > gate_:
continue
if v1 < v0 and v2 > v1:
if v0 - v1 < gate_:
drop.append(i+1)
elif v1 > v0 and v2 < v1:
if v1 - v0 < gate_:
drop.append(i+1)
idx = signal.iloc[drop].index
return signal.drop(idx)
def peak_valley(signal):
'''Return only the peaks and valleys'''
drop = []
for i,v in enumerate(signal.iloc[:-3]):
v0 = v
v1 = signal.iat[i+1]
v2 = signal.iat[i+2]
if v2 > v1 > v0 or v2 < v1 < v0:
drop.append(i+1)
idx = signal.iloc[drop].index
return signal.drop(idx)
def discretize(signal, bins=128):
'''
Discretize the signal into discrete bins.
:param Series signal: The input ``Series`` data
:param int bins: The number of bins to divide the data into, defaults
to 128.
'''
bins_list = np.linspace(min(signal), max(signal), bins).round(4)
discretized = []
for i,v in signal.iteritems():
discretized.append(round_nearest(v,bins_list))
return pd.Series(data=discretized, index=signal.index), bins_list
def merge_plateus(signal, tratio=0.0005):
'''
Average any flat plateaus in the signal.
``peak_valley()`` is called at the end because there may be a single
point (the merged point) remaining between a peak and valley.
:param float tratio: Tolerance ratio for what is deemed "flat". Is a
multiplier to (max()-min()). Defaults to 0.05%.
'''
idx_list = signal.index.tolist()
tol = tratio*(max(signal)-min(signal))
drop = []
for i,v in enumerate(signal.iloc[:-1]):
v0 = v
v1 = signal.iat[i+1]
if abs(v0-v1) <= tol:
idx_list[i] = (idx_list[i] + idx_list[i+1])/2
drop.append(i+1)
signal2 = signal.copy()
signal2.index = idx_list
idx = signal2.iloc[drop].index
return peak_valley(signal2.drop(idx))
def count4pt(signal, bins):
'''
Count cycles by the Four-Point Counting Method.
'''
rm = pd.DataFrame(0, index=bins, columns=bins)
sgnl = signal.copy()
i = 0
while True:
try:
s1 = sgnl.iloc[i + 0]
s2 = sgnl.iloc[i + 1]
s3 = sgnl.iloc[i + 2]
s4 = sgnl.iloc[i + 3]
except IndexError:
break
sr_i = abs(s2 - s3)
sr_o = abs(s1 - s4)
if sr_i <= sr_o and s4 >= max(s2,s3) and s1 <= min(s2,s3):
rm.at[s2,s3] += 1
s2_idx = sgnl.index[i + 1]
s3_idx = sgnl.index[i + 2]
sgnl.drop([s2_idx, s3_idx], inplace=True)
i = 0
else:
i += 1
return rm, sgnl
def plot_rm(rm, bins):
'''
Plot a rainflow cycles matrix as a 2D grid of values mapping the "From"
stress to the "To" stress.
'''
fig, ax = plt.subplots()
im = ax.pcolormesh(bins, bins, rm, shading='nearest', cmap='binary')
fig.colorbar(im)
ax.set_xlabel('To Stress')
ax.set_ylabel('From Stress')
ax.set_title('Cycles')
ax.set_aspect('equal')
ax.grid()
plt.show()
def table(rm):
'''
Return a DataFrame of stress ranges and their cycles.
Table data is of the form:
Cycles Range Mean
'''
idx_lst = rm.index
data = []
for fromstr, row in rm.iterrows():
for col,cycles in enumerate(row):
if cycles:
tostr = idx_lst[col]
range = abs(fromstr - tostr)
mean = (fromstr + tostr)/2
data.append([cycles, range, mean])
df = | pd.DataFrame(data, columns=['Cycles','Range','Mean']) | pandas.DataFrame |
"""
The MIT License (MIT)
Copyright (c) 2018 Zuse Institute Berlin, www.zib.de
Permissions are granted as stated in the license file you have obtained
with this software. If you find the library useful for your purpose,
please refer to README.md for how to cite IPET.
@author: <NAME>
"""
import pandas as pd
from toposort import toposort
from ipet.evaluation import Aggregation
import xml.etree.ElementTree as ElementTree
from .IPETFilter import IPETFilterGroup, IPETFilter
import numpy
from ipet.concepts.IPETNode import IpetNode, IpetNodeAttributeError
from ipet.misc import misc
import logging
from ipet import Experiment
from ipet import Key
from pandas.core.frame import DataFrame
from numpy import isnan
from ipet.evaluation.IPETFilter import IPETValue
from ipet.misc.misc import meanOrConcat
from ipet.validation import Validation
import sys
logger = logging.getLogger(__name__)
class IPETEvaluationColumn(IpetNode):
DEFAULT_REDUCTION = "meanOrConcat"
nodetag = "Column"
editableAttributes = ["name", "origcolname", "formatstr", "transformfunc", "reduction", "constant",
"alternative", "minval", "maxval", "comp", "regex", "reductionindex"]
possibletransformations = {None:(0, 0),
"abs":(1, 1),
"getGap":(2, 2),
"getCplexGap":(2, 2),
"getVariabilityScore":(1, -1),
"prod":(1, -1),
"sum":(1, -1),
"subtract":(2, 2),
"divide":(2, 2),
"log10":(1, 1),
"log":(1, 1),
"mean":(1, -1),
"shmean":(1, -1),
"median":(1, -1),
"std":(1, -1),
"min":(1, -1),
"max":(1, -1),
"getBestStatus" : (1, -1),
"getWorstStatus" : (1, -1),
"convertTimeStamp" : (1, 1),
"iqr" : (1, -1),
"lQuart" : (1, -1),
"uQuart" : (1, -1),
"strConcat" : (1, -1),
"meanOrConcat" : (1, -1)}
possiblereductions = [None] + \
[k for k, v in possibletransformations.items() if v == (1, -1)] + \
["shmean shift. by %d" % shift for shift in (1, 5, 10, 100, 1000)]
possiblecomparisons = [None, "quot", "difference"] + ["quot shift. by %d" % shift for shift in (1, 5, 10, 100, 1000)]
requiredOptions = {"comp":possiblecomparisons,
"origcolname":"datakey",
"transformfunc":list(possibletransformations.keys()),
"reduction" : possiblereductions}
deprecatedattrdir = {"nanrep" : "has been replaced by 'alternative'",
"translevel" : "use a suitable reduction index instead"}
def __init__(self, origcolname = None, name = None, formatstr = None, transformfunc = None, constant = None,
alternative = None, minval = None, maxval = None, comp = None, regex = None,
active = True, reduction = DEFAULT_REDUCTION, reductionindex = None, **kw):
"""
constructor of a column for the IPET evaluation
Parameters
----------
origcolname : column name in the original data frame
name : column name that will be displayed for this column
formatstr : a format string to define how the column gets printed, if no format
transformfunc : a transformation function, that should be applied to all children
columns of this column recursively. See also the 'translevel' attribute
constant : should this column represent a constant value?
alternative : conditional alternative constant or column name (also used to replace nans)
minval : a minimum value for all elements in this column
maxval : a maximum value for all elements in this column
comp : should a comparison for this column with the default group be made? This will append one column per group with this column
name and an appropriate suffix. Any nonexistent comp will raise a ValueError
regex : use for selecting a set of columns at once by including regular expression wildcards such as '*+?' etc.
active : True or "True" if this column should be active, False otherwise
reduction : aggregation function that is applied to reduce multiple occurrences of index
reductionindex : integer or string tuple (space separated)
"""
super(IPETEvaluationColumn, self).__init__(active, **kw)
self.origcolname = origcolname
self.name = name
self.formatstr = formatstr
self.transformfunc = transformfunc
self.constant = constant
self.alternative = alternative
self.minval = minval
self.maxval = maxval
self.set_comp(comp)
self.regex = regex
self.set_reduction(reduction)
self.set_reductionindex(reductionindex)
self.aggregations = []
self.filters = []
self.children = []
def checkAttributes(self):
if self.origcolname is None and self.regex is None and self.transformfunc is None and self.constant is None:
raise IpetNodeAttributeError("origcolname", "No origcolname, regex, constant, or transformfunction specified")
if self.transformfunc is not None:
if self.transformfunc not in self.possibletransformations:
raise IpetNodeAttributeError("transformfunc", "Unknown transformation <%s>" % (self.transformfunc))
minval, maxval = self.possibletransformations[self.transformfunc]
if len(self.children) < minval or maxval != -1 and len(self.children) > maxval:
raise IpetNodeAttributeError("transformfunc", "wrong number of children for transformation <%s>" % (self.transformfunc))
if self.reduction is not None:
if self.reduction not in self.possiblereductions:
raise IpetNodeAttributeError("Attribute 'reduction' has illegal value '%s'" % self.reduction)
return True
def isRegex(self) -> bool:
"""Is this a regular expression column
Returns
-------
bool
True if this column will search the data keys with a regular expression
"""
return (self.regex is not None)
def addChild(self, child):
if not self.acceptsAsChild(child):
raise ValueError("Cannot accept child %s as child of a column node" % child)
if child.__class__ is IPETEvaluationColumn:
self.children.append(child)
elif child.__class__ is Aggregation:
self.aggregations.append(child)
elif child.__class__ is IPETFilter:
self.filters.append(child)
def getChildren(self):
return self.children + self.aggregations + self.filters
def acceptsAsChild(self, child):
return child.__class__ in (IPETEvaluationColumn, Aggregation, IPETFilter)
def removeChild(self, child):
if child.__class__ is IPETEvaluationColumn:
self.children.remove(child)
elif child.__class__ is Aggregation:
self.aggregations.remove(child)
elif child.__class__ is IPETFilter:
self.filters.remove(child)
@staticmethod
def getNodeTag():
return IPETEvaluationColumn.nodetag
def getEditableAttributes(self):
return self.editableAttributes + super(IPETEvaluationColumn, self).getEditableAttributes()
def getRequiredOptionsByAttribute(self, attr):
return self.requiredOptions.get(attr, super(IPETEvaluationColumn, self).getRequiredOptionsByAttribute(attr))
def getName(self):
"""
infer the name for this column
if this column was constructed with a column name, the name is used
else if this column represents an original column of the data frame,
the original column name is used, otherwise, we construct an
artificial name that represents how this column is constructed
"""
if self.name is not None:
return self.name
elif self.origcolname is not None:
return self.origcolname
elif self.regex is not None:
return self.regex
elif self.constant is not None:
return "Const_%s" % self.constant
else:
prefix = self.transformfunc
if prefix is None:
prefix = ""
return prefix + ','.join((child.getName() for child in self.children))
def parseValue(self, val, df = None):
"""
parse a value into an integer (prioritized) or float
"""
if val in [None, ""]:
return None
for conversion in [int, float]:
try:
return conversion(val)
except:
pass
if df is not None and val in df.columns:
return df[val]
return val
def parseConstant(self):
"""
parse the constant attribute, which is a string, into an integer (prioritized) or float
"""
return self.parseValue(self.constant)
def getActiveFilters(self):
return [f for f in self.filters if f.isActive()]
def addAggregation(self, agg):
self.aggregations.append(agg)
def addFilter(self, filter_):
self.filters.append(filter_)
def getFormatString(self):
return self.formatstr
def set_comp(self, newvalue):
self.comp = None
if not newvalue:
self.comp = newvalue
elif newvalue == "quot" or newvalue == "difference":
self.comp = newvalue
elif newvalue.startswith("quot shift"):
try:
_ = float(newvalue[newvalue.rindex(" "):])
self.comp = newvalue
except ValueError:
raise ValueError("Trying to set an unknown comparison method '%s' for column '%s', should be in\n %s" % (newvalue, self.getName(), ", ".join((repr(c) for c in self.possiblecomparisons))))
def set_reduction(self, reduction):
"""Set the reduction function
"""
self.reduction = reduction
def set_reductionindex(self, reductionindex):
"""Set the reduction index of this column
None
a custom reduction index to apply this columns reduction function.
An integer n can be used to cause the reduction after the n'th
index column (starting at zero) of the corresponding evaluation.
If a negative integer -n is passed, this column creates its reduction index
from the evaluation index before the element indexed by '-n'.
If the desired reduction index is not a subset of the corresponding evaluation
index, a string tuple can be passed to uniquely define the columns by which
the reduced column should be indexed.
Example: The parent evaluation uses a three-level index 'A', 'B', 'C'. The column
should be reduced along the dimension 'B', meaning that the reduction yields
a unique index consisting of all combinations of 'A' X 'C'. This reduction can
be achieved by using "A C" as reduction index for this column.
Note that the reduction index must be a subset of the parent evaluation index.
Parameters
----------
reductionindex
integer or string (comma separated) reduction index for this column. None to use the entire index of the parent evaluation.
"""
if reductionindex is None or type(reductionindex) is int or isinstance(reductionindex, StrList):
self._reductionindex = reductionindex
elif type(reductionindex) is str:
try:
self._reductionindex = int(reductionindex)
except ValueError:
self._reductionindex = StrList(reductionindex)
self.reductionindex = reductionindex
def getCompareColName(self):
return self.getName() + self.getCompareSuffix()
def getCompareMethod(self):
if self.comp is None or self.comp == "":
return None
else:
if self.comp == "quot":
return numpy.true_divide
elif self.comp == "difference":
return numpy.subtract
else:
try:
shift = float(self.comp[self.comp.rindex(" "):])
return lambda x, y:numpy.true_divide(x + shift, y + shift)
except ValueError:
return None
def getCompareSuffix(self):
if self.getCompareMethod() is not None:
if self.comp == "quot":
return "Q"
elif self.comp == 'difference':
return "D"
else:
return "Q+" + (self.comp[self.comp.rindex(" ") + 1:])
return ""
def attributesToStringDict(self):
return {k:str(v) for k, v in self.attributesToDict().items() if v is not None}
def toXMLElem(self):
"""
convert this Column into an XML node
"""
me = ElementTree.Element(IPETEvaluationColumn.getNodeTag(), self.attributesToStringDict())
# iterate through children and aggregations and convert them to xml nodes
for child in self.children:
me.append(child.toXMLElem())
for agg in self.aggregations:
me.append(agg.toXMLElem())
for filter_ in self.filters:
me.append(filter_.toXMLElem())
return me
@staticmethod
def processXMLElem(elem):
if elem.tag == IPETEvaluationColumn.getNodeTag():
column = IPETEvaluationColumn(**elem.attrib)
for child in elem:
if child.tag == 'Aggregation':
column.addAggregation(Aggregation.processXMLElem(child))
elif child.tag == IPETFilter.getNodeTag():
column.addFilter(IPETFilter.processXMLElem(child))
elif child.tag == IPETEvaluationColumn.getNodeTag():
column.addChild(IPETEvaluationColumn.processXMLElem(child))
return column
@staticmethod
def getMethodByStr(funcname : str = DEFAULT_REDUCTION , modules = [numpy, misc]):
"""
Find a method via name.
Parameters
----------
funcname
string containing the name of the function
modules
list of modules to search for the function
Return
------
the requested function if it was found. Else an IpetNodeAttributeError is thrown.
"""
for module in modules:
try:
return getattr(module, funcname)
except AttributeError:
pass
raise IpetNodeAttributeError(funcname, "Unknown function %s" % funcname)
def getTransformationFunction(self):
"""
tries to find the transformation function from the numpy, misc, or Experiment modules
"""
# Do we also have to search in module Key (for getWorstStatus etc)?
return IPETEvaluationColumn.getMethodByStr(self.transformfunc, [numpy, misc, Experiment])
def getReductionFunction(self):
"""
tries to find the reduction function from the numpy, misc, or Experiment modules
"""
if self.reduction is not None and self.reduction.startswith("shmean shift. by"):
return lambda x:misc.shmean(x, shiftby = float(self.reduction.split()[-1]))
else:
return IPETEvaluationColumn.getMethodByStr(self.reduction, [numpy, misc, Experiment, Key.ProblemStatusCodes])
def getReductionIndex(self, evalindexcols : list) -> list:
"""Return this columns reduction index, which is a subset of the evaluation index columns
Parameters
----------
evalindexcols
list of evaluation index columns, may only contain a single element
Returns
-------
list
a list representing the (sub)set of columns representing this columns individual
reduction index
"""
if self._reductionindex is None:
return list(evalindexcols)
if type(self._reductionindex) is int:
reductionindex = min(self._reductionindex, len(evalindexcols))
# negative indices are also allowed
reductionindex = max(reductionindex, -len(evalindexcols))
return list(evalindexcols[:reductionindex])
else: # reduction index is a string tuple
for c in self._reductionindex.getList():
if c not in evalindexcols:
raise IpetNodeAttributeError(self.reduction, "reduction index column {} is not contained in evaluation index columns {}".format(c, evalindexcols))
return self._reductionindex.getList()
def getColumnData(self, df_long : DataFrame, df_target : DataFrame, evalindexcols : list) -> tuple:
"""
Retrieve the data associated with this column
Parameters
----------
df_long
DataFrame that contains original, raw data and already evaluated columns
df_target
DataFrame that has already been grouped to only been reduced to the target index columns
Returns
tuple
(df_long, df_target, result)
- df_long and df_target to which columns may have been appended
- result is the column (or data frame) view in df_long
"""
# if no children are associated with this column, it is either
# a column represented in the data frame by an 'origcolname',
# or a constant
if len(self.children) == 0:
if self.origcolname is not None:
try:
result = df_long[self.origcolname]
except KeyError as e:
# print an error message and make a series with NaN's
logger.warning("Could not retrieve data %s" % self.origcolname)
result = pd.Series(numpy.nan, index = df_long.index)
#
# filter for columns that match the regular expression
#
elif self.regex is not None:
result = df_long.filter(regex = self.regex)
#
# store scalar constant
#
elif self.constant is not None:
df_long[self.getName()] = self.parseConstant()
result = df_long[self.getName()]
else:
# try to apply an element-wise transformation function to the children of this column
# gettattr is equivalent to numpy.__dict__[self.transformfunc]
transformfunc = self.getTransformationFunction()
# concatenate the children data into a new data frame object
childframes = []
for child in self.children:
df_long, df_target, childresult = child.getColumnData(df_long, df_target, evalindexcols)
childframes.append(childresult)
# argdf = df_long[[child.getName() for child in self.children if child.isActive()]]
argdf = pd.concat(childframes, axis = 1)
applydict = dict(axis = 1)
try:
# try to directly apply the transformation function, this might fail for
# some transformations, e.g., the 'divide'-function of numpy because it
# requires two arguments instead of the series associated with each row
result = argdf.apply(transformfunc, **applydict)
except (TypeError, ValueError):
# try to wrap things up in a temporary wrapper function that unpacks
# the series argument into its single values
# e.g., wrap transformfunc((x,y)) as transformfunc(x,y)
def tmpwrapper(*args):
return transformfunc(*(args[0].values))
# apply the wrapper function instead
result = argdf.apply(tmpwrapper, **applydict)
if self.alternative is not None:
alternative = self.parseValue(self.alternative, df_long)
if alternative is not None:
booleanseries = pd.isnull(result)
for f in self.getActiveFilters():
booleanseries = numpy.logical_or(booleanseries, f.applyFilter(df_long).iloc[:, 0])
result = result.where(~booleanseries, alternative)
if self.minval is not None:
minval = self.parseValue(self.minval, df_long)
if minval is not None:
if type(minval) in [int, float]:
result = numpy.maximum(result, minval)
else:
comp = minval.astype(result.dtype)
try:
result = numpy.maximum(result, comp)
except:
logger.warning("When filling in the minimum, an error occurred for the column '{}':\n{}".format(self.getName(), self.attributesToStringDict()))
result = pd.concat([result, comp], axis = 1).max(axis = 1)
if self.maxval is not None:
maxval = self.parseValue(self.maxval, df_long)
if maxval is not None:
if type(maxval) in [int, float]:
result = numpy.minimum(result, maxval)
else:
comp = maxval.astype(result.dtype)
try:
result = numpy.minimum(result, comp)
except:
logger.warning("When filling in the maximum, an error occurred for the column '{}':\n{}".format(self.getName(), self.attributesToStringDict()))
result = pd.concat([result, comp], axis = 1).min(axis = 1)
reductionindex = self.getReductionIndex(evalindexcols)
#
# do not append frames with more than column. (They will be transformed at a higher level)
#
if len(result.shape) > 1:
if result.shape[1] > 1:
return df_long, df_target, result
else:
# a dataframe with only one column: select that one to get a series to work with in further code
result = result[result.columns[0]]
if len(reductionindex) > 0:
# apply reduction and save the result by joining it into both data frames
nrows_long = df_long.shape[0]
nrows_target = df_target.shape[0]
df_long[self.getName()] = result
# the number of rows in the long and the target data frame is equal
# computations are faster in this case because the target result
# is simply a permutation of the result
#
if nrows_long == nrows_target and reductionindex == evalindexcols:
# ## set index for the join operation
targetresult = result.copy()
targetresult.index = df_long.set_index(evalindexcols).index
targetresult = targetresult.rename(self.getName())
else:
#
# reduction index is smaller than the evalindex, perform a reduction
# based on the defined reduction index
targetresult = df_long.groupby(by = reductionindex)[self.getName()].apply(self.getReductionFunction())
#
# The join operations resamples the possibly reduced result based on the reduction index
#
df_long = df_long.join(targetresult, on = reductionindex, lsuffix = "_old")
#
# this column should usually not appear in the target, yet. A join operation is necessary
# because the index of this frame is a permuted version of the original data frame
#
if not self.getName() in df_target:
df_target = df_target.join(targetresult, on = reductionindex, lsuffix = "_old")
else:
#
# add scalar to both data frames
#
scalar = self.getReductionFunction()(result)
df_long[self.getName()] = scalar
if not self.getName() in df_target:
df_target[self.getName()] = scalar
return df_long, df_target, result
def getStatsTests(self):
return [agg.getStatsTest() for agg in self.aggregations if agg.getStatsTest() is not None]
def addDependency(self, dependencies, dep):
if dep not in dependencies[self.getName()]:
dependencies[self.getName()].add(dep)
def getDependencies(self):
"""Return a list of data frame column names that this column requires
"""
dependencies = {self.getName() : set()}
if self.origcolname is not None:
self.addDependency(dependencies, self.origcolname)
for c in [self.minval, self.alternative, self.maxval]:
if c is not None and self.parseValue(c) is None:
self.addDependency(dependencies, c)
for i in self.children:
self.addDependency(dependencies, i.getName())
dependencies.update(i.getDependencies())
for i in self.filters:
for j in [1, 2]:
dep = i.getDependency(j)
if dep is not None:
self.addDependency(dependencies, dep)
return dependencies
def getStatsColName(self, statstest):
"""Return the column name of the statstest"""
return '_'.join((self.getName(), statstest.__name__))
def getAllStatsColNames(self, statstest):
"""Return a list of the column names of all statstests"""
return [self.getStatsColName(statstest) for statstest in self.getStatsTests()]
def getAggColName(self, aggr):
"""Return the column name of the aggrecation"""
return '_'.join((self.getName(), aggr.getName()))
def getAllAggColNames(self):
"""Return a list of the column names of all aggregations"""
return [self.getAggColName(aggr) for aggr in self.aggregations]
def hasCompareColumn(self, title):
if title == "":
return False
if title == self.getCompareColName():
# title is a compare column of the long table
return True
if title[-1] in ["Q", "p"]:
if title[:-1] in self.getAllAggColNames():
# title is a compare column of aggregated table or statistical test column
return True
return False
def hasDataColumn(self, title):
if title == "":
return False
if title == self.getName():
# title is a data column of the long table
return True
if title in self.getAllAggColNames():
# title is a compare column of aggregated table or statistical test column
return True
return False
class FormatFunc:
def __init__(self, formatstr):
self.formatstr = formatstr[:]
def beautify(self, x):
return (self.formatstr % x)
class StrList:
"""
Represents an easier readible and parsable list of strings
"""
def __init__(self, strList, splitChar = " "):
self.list = StrList.splitStringList(strList, splitChar)
self.splitChar = splitChar
@staticmethod
def splitStringList(strList, splitChar = " "):
"""Split a string that represents list elements separated by optional split-character
"""
if strList is None or strList == "":
return None
elif type(strList) is str:
if splitChar == " ":
return strList.split()
else:
return strList.split(splitChar)
else:
return list(strList)
def getList(self):
if self.list is None:
return []
return self.list
def __str__(self):
if self.list is None:
return ""
return self.splitChar.join(self.list)
class IPETEvaluation(IpetNode):
"""
evaluates a comparator with given group keys, columns, and filter groups
An evaluation transfers raw, collected data from a collection of testruns
into tables based on selected columns, filter groups, and aggregations.
An evaluation and its friends come with an easy-to-modify XML language
for modification.
By defining multiple evaluations,
it is therefore possible to view the same raw data through multiple angles
"""
nodetag = "Evaluation"
# todo put tex, csv etc. here as other possible streams for filter group output
possiblestreams = ['stdout', 'tex', 'txt', 'csv']
# DEFAULT_GROUPKEY = "Settings"
DEFAULT_GROUPKEY = Key.ProblemStatus
DEFAULT_COMPARECOLFORMAT = "%.3f"
DEFAULT_INDEX = " ".join([Key.ProblemName, Key.LogFileName])
DEFAULT_INDEXSPLIT = -1
ALLTOGETHER = "_alltogether_"
editableAttributes = ["defaultgroup", "sortlevel", "comparecolformat", "grouptags", "index", "indexsplit", "validate", "suppressions", "fillin", "integral"]
attributes2Options = {
"grouptags" : [True, False],
"fillin" : [True, False]
}
deprecatedattrdir = {"groupkey" : "groupkey is specified using 'index' and 'indexsplit'",
"evaluateoptauto" : "Optimal auto settings are no longer available, use reductions instead"}
def __init__(self, defaultgroup = None,
sortlevel = None, comparecolformat = DEFAULT_COMPARECOLFORMAT,
index = DEFAULT_INDEX, indexsplit = DEFAULT_INDEXSPLIT,
validate = None, suppressions = None, grouptags = None, fillin = None, integral = None, **kw):
"""
constructs an Ipet-Evaluation
Parameters
----------
defaultgroup : the values of the default group to be compared with, if left empty a defaultgroup is generated
sortlevel : int or None, level on which to base column sorting, default: None (for no sorting)
comparecolformat : format string for comparison columns
index : (string or list or None) single or multiple column names that serve as (row) and column index levels, if 'auto' an index is generated.
indexsplit : (int) position to split index into row and column levels, negative to count from the end.
validate : (string) for the relative or absolute location of a solu file for validation.
suppressions : (string) column names that should be excluded from output (as comma separated list of simple strings or regular expressions).
grouptags : (bool) True if group tags should be displayed in long table, otherwise False
fillin : (bool) True if missing data should be filled in, otherwise False
integral : a string that specifies how to compute primal and dual integrals. Examples are 'unscaled 600 None' or 'scaled 0.5 1.0'.
Specifying an integral leads to a recomputation of integrals by the experiment.
"""
# construct super class first, Evaluation is currently always active
super(IPETEvaluation, self).__init__(True, **kw)
self.filtergroups = []
self.comparecolformat = comparecolformat[:]
self.columns = []
self.evaluated = False
self.feastol = None
self.gaptol = None
self.defaultgroup = defaultgroup
self.set_index(index)
self.set_indexsplit(indexsplit)
self.set_sortlevel(sortlevel)
self.set_grouptags(grouptags)
self.set_fillin(fillin)
self.integral = integral
self.set_validate(validate)
self.suppressions = suppressions
def getName(self):
return self.nodetag
def set_grouptags(self, grouptags):
if grouptags not in [None, "", False, "False"]:
self.grouptags = True
else:
self.grouptags = False
def set_fillin(self, fillin):
if fillin not in [None, "", False, "False"]:
self.fillin = True
else:
self.fillin = False
def isEvaluated(self):
"""
returns whether this evaluation has been evaluated since its columns or filter groups have been modified
"""
return self.evaluated
def setEvaluated(self, evaluated):
"""
change the flag if this evaluation has been evaluated since its last modification
"""
self.evaluated = evaluated
def set_sortlevel(self, sortlevel):
self.sortlevel = int(sortlevel) if sortlevel is not None else None
if self.sortlevel is not None and self.getColIndex() != []:
ncols = len(self.getColIndex()) + 1
if self.sortlevel >= ncols:
logger.warning("Sortlevel too large: Value ({}) needs to be in [0, {}].".format(self.sortlevel, ncols - 1))
def setCompareColFormat(self, comparecolformat):
self.comparecolformat = comparecolformat[:]
def attributesToStringDict(self):
return {k:str(v) for k, v in self.attributesToDict().items() if v is not None and str(v) != ""}
@staticmethod
def getNodeTag():
return IPETEvaluation.nodetag
def getEditableAttributes(self):
return self.editableAttributes
def getChildren(self):
return self.columns + self.filtergroups
def acceptsAsChild(self, child):
return child.__class__ in (IPETEvaluationColumn, IPETFilterGroup)
def addChild(self, child):
if not self.acceptsAsChild(child):
raise ValueError("Cannot accept child %s as child of an evaluation node" % child)
if child.__class__ is IPETEvaluationColumn:
self.columns.append(child)
elif child.__class__ is IPETFilterGroup:
self.filtergroups.append(child)
self.setEvaluated(False)
def removeChild(self, child):
if child.__class__ is IPETEvaluationColumn:
self.columns.remove(child)
elif child.__class__ is IPETFilterGroup:
self.filtergroups.remove(child)
self.setEvaluated(False)
def getRequiredOptionsByAttribute(self, attr):
return self.attributes2Options.get(attr, super(IPETEvaluation, self).getRequiredOptionsByAttribute(attr))
def addFilterGroup(self, fg):
# check if a filter group of the same name already exists
if fg.getName() in (fgroup.getName() for fgroup in self.filtergroups):
raise ValueError("Error: Filter group of name <%s> already existing in current evaluation!" % fg.getName())
self.filtergroups.append(fg)
self.setEvaluated(False)
def removeFilterGroup(self, fg):
self.filtergroups.remove(fg)
self.setEvaluated(False)
def set_index(self, index : list):
"""Set index identifier list
"""
self.autoIndex = False
self.index = index
self._index = StrList(index)
logger.debug("Set index to '{}'".format(index))
if index == "auto":
self.autoIndex = True
return
def getRowIndex(self) -> list:
"""Return (list of) keys to create row index
"""
return self.getIndex()[:self.indexsplit]
def getColIndex(self) -> list:
"""Return (list of) keys to create column index
"""
return self.getIndex()[self.indexsplit:]
def getIndex(self) -> list:
"""Return all index columns as a list
"""
return self._index.getList()
def getDefaultgroup(self, data):
"""Return tuple representation of defaultgroup
Parameters:
data
data frame object with columns that match the specified column index
"""
# split the default group on colons
dg = StrList.splitStringList(self.defaultgroup, ":")
if dg is None:
x = None
else:
# try casting as many to float as possible
x = list(dg)
for i in range(len(x)):
try:
x[i] = float(x[i])
except:
pass
defaultgroup = None
# try to match the length of x to the length of the specified column index
if x is not None:
if len(x) > len(self.getColIndex()):
x = x[:len(self.getColIndex())]
if len(x) == 1:
defaultgroup = x[0]
else:
defaultgroup = tuple(x)
#
# check if this group is contained
#
if self.defaultgroupIsContained(defaultgroup, data):
return defaultgroup
#
# the default group is None or not contained
# -> use first element in the data frame
#
if len(self.getColIndex()) == 1:
return data[self.getColIndex()].iloc[0, :].values[0]
else:
return tuple(data.iloc[0][self.getColIndex()])
def set_defaultgroup(self, dg : str):
"""Set defaultgroup
Parameters
----------
dg
string representation of the defaultgroup in format "val1:val2:val3", or None
"""
self.defaultgroup = dg
logger.debug("Set defaultgroup to {}".format(self.defaultgroup))
self.setEvaluated(False)
def set_indexsplit(self, indexsplit):
self.indexsplit = int(indexsplit)
# make sure that we have at least one col as rowindex
if self.index is not None and not self.autoIndex:
self.indexsplit = min(len(self.index), self.indexsplit)
def addColumn(self, col):
self.columns.append(col)
self.setEvaluated(False)
def removeColumn(self, col):
self.columns.remove(col)
self.setEvaluated(False)
def set_suppressions(self, suppressions: str):
"""Sets suppressions attribute to the argument of this function
"""
self.suppressions = suppressions
def addComparisonColumns(self, df: DataFrame) -> DataFrame:
""" Add the comparison columns.
Add the specified comparison columns to df, returns extended df in the same format
Parameters
----------
df
DataFrame containing only relevant data.
df has ids as index. The indexkeys are columns.
Returns
-------
DataFrame
The original DataFrame with the extra columns appended.
"""
if self.getColIndex() == []:
return df
usercolumns = []
dg = self.getDefaultgroup(df)
for col in self.toposortColumns(self.getActiveColumns()):
# look if a comparison with the default group should be made
if col.getCompareMethod() is not None:
df_bar = df.set_index(self.getRowIndex(), drop = True)
grouped = df_bar.groupby(by = self.getColIndex())[col.getName()]
compcol = dict(list(grouped))[dg]
comparecolname = col.getCompareColName()
# apply the correct comparison method to the original and the temporary column
compmethod = col.getCompareMethod()
method = lambda x:compmethod(*x)
df[comparecolname] = 0
df.set_index(self.getIndex(), inplace = True)
for name, group in grouped:
tmpgroup = DataFrame(group)
tmpgroup["_tmpcol_"] = compcol
tmpgroup[comparecolname] = tmpgroup[[col.getName(), "_tmpcol_"]].apply(method, axis = 1) # .set_index(group.index)
#
colindex = self.getColIndex()
if len(colindex) > 1:
for n, i in zip(name, colindex):
tmpgroup[i] = n
else:
tmpgroup[colindex[0]] = name
tmpgroup.reset_index(drop = False, inplace = True)
tmpgroup.set_index(self.getIndex(), inplace = True)
newvals = tmpgroup[comparecolname]
df[comparecolname].update(newvals)
df.reset_index(drop = False, inplace = True)
usercolumns.append(comparecolname)
# TODO Sort usercolumns?
self.usercolumns = self.usercolumns + usercolumns
return df
def reduceToColumns(self, df_long : DataFrame, df_target : DataFrame) -> tuple:
""" Reduce the huge number of columns
The data frame is reduced to the columns of the evaluation.
(concatenate usercolumns, neededcolumns and additionalfiltercolumns from df_long)
Parameters
----------
df_long
DataFrame returned by Experiment with preprocessed columns '_count_', '_solved_', etc..
Dataframe to evaluate, mostly joined data from an experiment,
that contains the necessary columns required by this evaluation.
For example: A dataframe containing the parsed data from one or
multiple .trn files created by ipet-parse.
df_target
DataFrame with preprocessed columns that contain the index column
Returns
-------
tuple
df_long, df_target after processing the user columns
"""
# We are only interested in the columns that are currently active
usercolumns = [c.getName() for c in self.getActiveColumns()]
evalindexcols = self.getIndex()
#
# loop over a topological sorting of the active columns to compute
#
for col in self.toposortColumns(self.getActiveColumns()):
try:
df_long, df_target, _ = col.getColumnData(df_long, df_target, evalindexcols)
except Exception as e:
logger.warning("An error occurred for the column '{}':\n{}".format(col.getName(), col.attributesToStringDict()))
raise e
logger.debug("Target data frame : \n{}\n".format(df_target))
newcols = [Key.ProblemStatus, Key.SolvingTime, Key.TimeLimit, Key.ProblemName]
self.usercolumns = usercolumns
return df_target
def fillMissingData(self, data):
"""
Fill in missing elements to ensure that all possible index combinations are available.
Parameters
----------
data
the dataframe
Returns
-------
newdata
dataframe filled with missing data
"""
list_list = [list(set(data[i])) for i in self.getIndex()]
data["_miss_"] = False
newind = pd.MultiIndex.from_product(list_list, names=self.getIndex())
if len(newind) < len(data):
raise AttributeError("Index not unique, cannot fill in data. Exiting.")
newdata = data.set_index(self.getIndex()).reindex(newind).reset_index()
newdata["_miss_"].fillna(value=True, inplace=True)
newdata[Key.ProblemStatus].fillna(Key.ProblemStatusCodes.Missing, inplace=True)
return newdata
def toposortColumns(self, columns : list) -> list:
""" Compute a topological ordering respecting the data dependencies of the specified column list.
Parameters
----------
columns
A list of the column-objects to be sorted.
Returns
-------
list
A list of topologically sorted column objects.
"""
adj = self.getDependencies(columns)
toposorted = list(toposort(adj))
logger.debug("TOPOSORT:\nDependency List: {},\nTopological Ordering: {}".format(adj, toposorted))
def getIndex(name, toposorted):
for idx, topo in enumerate(toposorted):
if name in topo: return idx
return -1
indices = {col.getName() : getIndex(col.getName(), toposorted) for col in columns}
return sorted(columns, key = lambda x: indices.get(x.getName(), -1))
def getDependencies(self, columns : list) -> dict:
""" Recursively collect the dependencies of a list of columns.
Parameters
----------
columns
A list of columns
Returns
-------
A dictionary containing the names and dependencies of the columns.
"""
adj = {}
for col in columns:
newdeps = col.getDependencies()
for key, val in newdeps.items():
adj.setdefault(key, set()).update(val)
return adj
def getValidate(self):
"""this evaluations validation attribute
Returns
-------
string
either the current validation file as a string, or None if unspecified.
"""
return self.validate
def set_validate(self, validate):
"""sets this evaluation's validation attribute
Parameters
----------
validate : str or None
new value for the source of validation information for this evaluation
"""
self.validate = validate
def set_gaptol(self, gaptol):
"""sets this evaluation's gaptol attribute
Parameters
----------
gaptol : str or float
new value for the gaptol for this evaluation
"""
self.gaptol = gaptol
def set_feastol(self, feastol):
"""sets this evaluation's feastol attribute
Parameters
----------
feastol : str or float
new value for the feastol for this evaluation
"""
self.feastol = feastol
def validateData(self, df : DataFrame) -> DataFrame:
"""validate data based on external solution information
"""
if not self.validate:
logger.info("No validation information specified")
file_exists = False
else:
try:
f = open(self.validate, "r")
f.close()
file_exists = True
logger.info("Validation information provided: '{}'".format(self.validate))
except:
file_exists = False
logger.warning("Could not open validation file '{}'".format(self.validate))
if file_exists:
v = Validation(self.validate)
else:
v = Validation(None)
if self.feastol:
try:
v.set_feastol(float(self.feastol))
except:
pass
if self.gaptol:
try:
v.set_tol(float(self.gaptol))
except:
pass
result = v.validate(df)
logger.info("Validation resulted in the following status codes: [{}]".format(
"|".join([" {}: {} ".format(k, v) for k, v in result.value_counts().items()])))
df[Key.ProblemStatus] = result
return df
def calculateNeededData(self, df : DataFrame) -> DataFrame:
""" Add the status columns.
Calculate and append needed data about statuses
Parameters
----------
df
DataFrame containing only relevant data.
df has ids as index. The indexkeys are columns.
Returns
-------
DataFrame
The original DataFrame with the extra columns appended.
"""
df['_time_'] = (df[Key.ProblemStatus].isin((Key.ProblemStatusCodes.Better, Key.ProblemStatusCodes.TimeLimit)))
# df['_time_'] = (df[Key.ProblemStatus] == Key.ProblemStatusCodes.TimeLimit)
df['_limit_'] = ((df['_time_']) | df[Key.ProblemStatus].isin([Key.ProblemStatusCodes.NodeLimit,
Key.ProblemStatusCodes.MemoryLimit,
Key.ProblemStatusCodes.Interrupted
]))
df['_primfail_'] = df[Key.ProblemStatus].isin([
Key.ProblemStatusCodes.FailObjectiveValue,
Key.ProblemStatusCodes.FailSolInfeasible,
Key.ProblemStatusCodes.FailSolOnInfeasibleInstance,
])
df['_dualfail_'] = df[Key.ProblemStatus].isin([Key.ProblemStatusCodes.FailDualBound])
df['_fail_'] = df['_primfail_'] | \
df['_dualfail_'] | \
df[Key.ProblemStatus].isin([Key.ProblemStatusCodes.FailReaderror,
Key.ProblemStatusCodes.FailInconsistent,
Key.ProblemStatusCodes.Fail])
df['_abort_'] = (df[Key.ProblemStatus] == Key.ProblemStatusCodes.FailAbort)
df['_solved_'] = (~df['_limit_']) & (~df['_fail_']) & (~df['_abort_']) & (~df['_miss_'])
df['_count_'] = 1
df['_unkn_'] = (df[Key.ProblemStatus] == Key.ProblemStatusCodes.Unknown)
self.countercolumns = ['_time_', '_limit_', '_primfail_', '_dualfail_', '_fail_', '_abort_', '_solved_', '_unkn_', '_count_', '_miss_']
return df
def toXMLElem(self):
me = ElementTree.Element(IPETEvaluation.getNodeTag(), self.attributesToStringDict())
for col in self.columns:
me.append(col.toXMLElem())
for fg in self.filtergroups:
fgelem = fg.toXMLElem()
me.append(fgelem)
return me
@staticmethod
def fromXML(xmlstring):
tree = ElementTree.fromstring(xmlstring)
return IPETEvaluation.processXMLElem(tree)
@staticmethod
def fromXMLFile(xmlfilename):
tree = ElementTree.parse(xmlfilename)
return IPETEvaluation.processXMLElem(tree.getroot())
@staticmethod
def processXMLElem(elem):
if elem.tag == IPETEvaluation.getNodeTag():
logger.debug("Construct IPET Evaluation with attributes : \n{}".format(elem.attrib))
ev = IPETEvaluation(**elem.attrib)
for child in elem:
if child.tag == IPETFilterGroup.getNodeTag():
# add the filter group to the list of filter groups
fg = IPETFilterGroup.processXMLElem(child)
ev.addFilterGroup(fg)
elif child.tag == IPETEvaluationColumn.getNodeTag():
ev.addColumn(IPETEvaluationColumn.processXMLElem(child))
return ev
def reduceByIndex(self, df : DataFrame) -> DataFrame:
""" Reduce data to have a unique index given by indexkeys.
Each column is reduced by it's reduction function such that indexkeys yield a unique hierarchical index.
Parameters
----------
df
DataFrame containing data to be reduced.
df has ids as index. The indexkeys are columns.
Returns
-------
DataFrame
The reduced DataFrame.
"""
grouped = df.groupby(by = self.getIndex())
newcols = []
reductionMap = {'_solved_' : numpy.all, '_count_' : numpy.max}
for col in self.countercolumns:
newcols.append(grouped[col].apply(reductionMap.get(col, numpy.any)))
#
# compute additional, requested columns for filters
#
activecolumns = [c.getName() for c in self.getActiveColumns()]
additionalfiltercolumns = []
for fg in self.getActiveFilterGroups():
additionalfiltercolumns += fg.getNeededColumns(df)
additionalfiltercolumns = list(set(additionalfiltercolumns))
additionalfiltercolumns = [afc for afc in additionalfiltercolumns if afc not in set(activecolumns + self.countercolumns + self.getIndex())]
for col in additionalfiltercolumns:
newcols.append(grouped[col].apply(meanOrConcat))
reduceddf = pd.concat(newcols, axis = 1)
ind = self.getIndex()
index_uniq = [i for i in ind if i not in reduceddf.columns]
index_dupl = [i for i in ind if i in reduceddf.columns]
reduceddf = reduceddf.reset_index(index_uniq)
reduceddf = reduceddf.reset_index(index_dupl, drop = True)
#
# search for duplicate column names to avoid cryptic error messages later
#
if True in reduceddf.columns.duplicated():
raise ValueError("Duplicate columns {} in reduced data frame, aborting".format(reduceddf.columns.get_duplicates()))
return reduceddf
def convertToHorizontalFormat(self, df : DataFrame) -> DataFrame:
""" Convert data to have an index given by indexkeys.
Indexkeys are defined by "index" and "columnindex", these yield a unique index.
indexkeys[0] is taken as (hierarchical) row index,
indexkeys[1] is taken as (hierarchical) column index.
Parameters
----------
df
DataFrame containing data to be converted.
df has ids as index. The indexkeys are columns.
Returns
-------
DataFrame
The converted DataFrame.
"""
#
# restrict the columns to those that should appear in
# the final table, but make sure that no columns
# appear twice. Respect also the order of the columns
#
columns = []
colset = set()
for c in self.usercolumns + self.getIndex() + ["groupTags"]:
if c not in colset and c not in self.countercolumns and c in df.columns:
columns.append(c)
colset.add(c)
df = df[columns].set_index(self.getIndex()).sort_index(level = 0)
df = df.unstack(self.getColIndex())
if len(self.getColIndex()) > 0 :
df = df.swaplevel(0, len(self.getColIndex()), axis = 1)
return df
def checkStreamType(self, streamtype):
if streamtype not in self.possiblestreams:
return False
else:
return True
def getActiveFilterGroups(self):
return [fg for fg in self.filtergroups if fg.isActive()]
def getActiveColumns(self):
return [col for col in self.columns if col.isActive()]
def getColumnFormatters(self, df):
"""
returns a formatter dictionary for all columns of this data frame
expects a Multiindex column data frame df
"""
all_colnames = df.columns
if len(all_colnames) == 0:
return {}
formatters = {}
l = 0
if not isinstance(all_colnames[0], str):
l = len(all_colnames[0]) - 1
comptuples = []
# loop over columns
for col in self.getActiveColumns():
colname = col.getName()
# determine all comparison columns and append them to the list
if col.getCompareMethod() is not None:
if l == 0:
comptuples += [dfcol for dfcol in all_colnames if col.hasCompareColumn(dfcol)]
else:
comptuples += [dfcol for dfcol in all_colnames if col.hasCompareColumn(dfcol[l])]
# if the column has no formatstr attribute, continue
colformatstr = col.getFormatString()
if not colformatstr:
continue
# for long table: retrieve all columns as tuples that contain the column name, ie. for column 'Time' and
# settings 'default' and 'heuroff', the result should be [('default', 'Time'),('heuroff', 'Time')]
if l == 0:
tuples = [dfcol for dfcol in all_colnames if col.hasDataColumn(dfcol)]
else:
tuples = [dfcol for dfcol in all_colnames if col.hasDataColumn(dfcol[l])]
# add new formatting function to the map of formatting functions
for thetuple in tuples:
formatters.update({thetuple:FormatFunc(colformatstr).beautify})
# display countercolumns as integer
counting_columns = [dfcol for dfcol in all_colnames if dfcol[l].startswith("_") and dfcol[l].endswith("_")]
for cctup in counting_columns:
formatters.update({cctup:FormatFunc("%.0f").beautify})
for comptuple in comptuples:
formatters.update({comptuple:FormatFunc(self.comparecolformat).beautify})
return formatters
def sortDataFrame(self, df):
if self.sortlevel is not None:
return df.sort_index(level = self.sortlevel, axis = 1, inplace = False)
else:
return df
def suppressColumns(self, df : DataFrame) -> DataFrame:
"""Returns a new data frame with all columns removed that match the suppressions attribute
"""
if not self.suppressions: # None or empty string
return df
suppressions = self.suppressions.split()
df_reduced = df.copy()
for suppression in suppressions:
df_reduced.drop(list(df_reduced.filter(regex = suppression)), axis = 1, inplace = True)
return df_reduced
def streamDataFrame(self, df, filebasename, streamtype):
df = self.sortDataFrame(df)
df = self.suppressColumns(df)
if not self.checkStreamType(streamtype):
raise ValueError("Stream error: Unknown stream type %s" % streamtype)
streammethod = getattr(self, "streamDataFrame_%s" % streamtype)
formatters = self.getColumnFormatters(df)
streammethod(df, filebasename, formatters)
def streamDataFrame_stdout(self, df, filebasename, formatters = {}):
"""
print to console
"""
print("%s:" % filebasename)
print(df.to_string(formatters = formatters))
def streamDataFrame_tex(self, df : DataFrame, filebasename, formatters = {}):
"""
write tex output
"""
with open("%s.tex" % filebasename, "w") as texfile:
texfile.write(df.to_latex(formatters = formatters, escape=False))
def flatten_index(self, col) -> str:
if type(col) is tuple:
return '_'.join(map(str, col[::-1]))
else:
return col
def streamDataFrame_csv(self, df : DataFrame, filebasename, formatters = {}):
with open("%s.csv" % filebasename, "w") as csvfile:
#
# obviously, the support for custom csv formatters was dropped
# at some pandas update. This is not terrible as
# usually, a csv file is an intermediate product, anyway,
# and the tool that gets the csv as input can handle
# the final formatting.
#
logger.warn("Warning. Custom formatting ignored for csv output")
df.columns = [self.flatten_index(col) for col in df.columns]
df.to_csv(csvfile)
def streamDataFrame_txt(self, df : DataFrame, filebasename, formatters = {}):
"""
write txt output
"""
with open("%s.txt" % filebasename, "w") as txtfile:
df.to_string(txtfile, formatters = formatters, index_names = False, sparsify = False)
def findStatus(self, statuscol):
uniques = set(statuscol.unique())
for status in ["ok", "timelimit", "nodelimit", "memlimit", "unknown", "fail", "abort"]:
if status in uniques:
return status
else:
return statuscol.unique()[0]
def checkMembers(self):
"""
checks the evaluation members for inconsistencies
"""
if self.columns == []:
raise AttributeError("Please specify at least one column.")
for col in self.columns:
try:
col.checkAttributes()
except Exception as e:
raise AttributeError("Error in column definition of column %s:\n %s" % (col.getName(), e))
if col.isRegex():
raise AttributeError("Top level column {} must not specify a regular expression".format(col.getName()))
def getAggregatedGroupData(self, filtergroup):
if not filtergroup in self.filtergroups:
raise ValueError("Filter group %s (name:%s) is not contained in evaluation filter groups" % (filtergroup, filtergroup.getName()))
if not filtergroup.isActive():
raise ValueError("Filter group %s is currently not active" % filtergroup.getName())
return self.filtered_agg.get(filtergroup.getName(), DataFrame())
def getInstanceGroupData(self, filtergroup):
if not filtergroup in self.filtergroups:
raise ValueError("Filter group %s (name:%s) is not contained in evaluation filter groups" % (filtergroup, filtergroup.getName()))
if not filtergroup.isActive():
raise ValueError("Filter group %s is currently not active" % filtergroup.getName())
return self.filtered_instancewise.get(filtergroup.getName(), DataFrame())
def getAggregatedData(self):
return self.retagg
def getInstanceData(self):
return self.rettab
def defaultgroupIsContained(self, group, data) -> bool:
'''
Check if the given group is contained in the data.
Parameters
----------
group
scalar or tuple representing a default group
data
raw DataFrame
Returns
-------
bool
True if the group is found, else False
'''
#
# check if the column index and the group have equal length
# (be careful about group being string)
#
cIndex = self.getColIndex()
if type(group) is tuple and len(group) != len(cIndex):
return False
elif type(group) is not tuple and len(cIndex) > 1:
return False
#
# depending on the length of the column index, different methods apply
#
if len(cIndex) == 1:
#
# use scalar comparison
#
return numpy.any(data[self.getColIndex()] == group)
else:
#
# use conjunction of scalar comparisons (this is not fast)
#
result = True
for idx, l in enumerate(cIndex):
result = result & (data[l] == group[idx])
return numpy.any(result)
def tryGenerateIndexAndDefaultgroup(self, data):
'''
Generate a reasonable index and defaultgroup based on the given data
Take a look at the columns: Key.ProblemName, Key.Solver, Key.Settings,
Key.Version and Key.LogFileName.
Set indexsplit to 1 and choose the column with the most values as rowindex.
From the remaining columns choose as columnindex as one or two columns with
as little values as possible but at least two.
At last generate a defaultgroup based on the new index.
Parameters
----------
data
the data of the experiment
'''
# do this only if the user requested an automatic index
if not self.autoIndex:
return
lowerbound = 1 # 1 or bigger
possible_indices = [Key.ProblemName, Key.Solver, Key.Settings, Key.Version, Key.LogFileName]
height = data.shape[0]
# find the indices that are represented in the data with their numbers of unique values
present_indices = [[key, data[key].nunique()] for key in possible_indices if key in data.columns]
# take the index with the max value of the previous as rowindex
first = max(present_indices, key = lambda y: y[1])
processed_indices = [[key, count] for [key, count] in present_indices if count > lowerbound and key != first[0]]
sorted_indices = sorted(processed_indices, key = lambda y: y[1])
# try to find a columnindex
second = []
if len(sorted_indices) > 0 and sorted_indices[0][0] != first[0]:
second = [sorted_indices[0]]
# check if a second columnindex can be helpful
if len(sorted_indices) > 1 and (height / first[1]) / second[0][1] > 1:
second.append(sorted_indices[1])
# set everything
self.indexsplit = 1
self.set_index(" ".join([i[0] for i in [first] + second]))
logger.info("Automatically set index to ({}, {})".format(self.getRowIndex(), self.getColIndex()))
def recomputeIntegrals(self, exp):
"""
recompute primal and dual integrals of experiment if 'integral' property has been set by user.
"""
if self.integral:
try:
scale, a, b = self.integral.split()
scale = True if scale == "scaled" else False
if a == "None":
a = None
else:
a = float(a)
if b == "None":
b = None
else:
b = float(b)
logger.info("Recomputing primal and dual integrals with specification '{}': scale: {}, (a,b)=({},{})".format(self.integral, scale, a,b))
exp.calculateIntegrals(scale=scale, lim=(a,b))
except:
raise Exception("Unrecognized format for integral '{}'".format(self.integral))
def evaluate(self, exp : Experiment):
"""
evaluate the data of an Experiment instance exp
Parameters
----------
exp
an experiment instance for which data has already been collected
Returns
-------
rettab
an instance-wise table of the specified columns
retagg
aggregated results for every filter group and every entry of the specified
"""
self.checkMembers()
self.recomputeIntegrals(exp)
# data is concatenated along the rows and eventually extended by external data
data = exp.getJoinedData().copy()
logger.debug("Result of getJoinedData:\n{}\n".format(data))
self.tryGenerateIndexAndDefaultgroup(data)
# possiblebasegroups = sorted(data[self.getColIndex()[0]].unique())
# logger.info(" Default group <%s> not contained, have only: %s" % (self.getDefaultgroup(), ", ".join(possiblebasegroups)))
# self.defaultgrouptuple = possiblebasegroups[0]
# logger.info(" Using value <%s> as base group" % (self.getDefaultgroup()))
data = self.validateData(data)
# Fill in must happen after index creation and data validation
if self.fillin:
data = self.fillMissingData(data)
else:
# create the column '_miss_' which is created within self.fillMissingData() otherwise
data['_miss_'] = False
data = self.calculateNeededData(data)
logger.debug("Result of calculateNeededData:\n{}\n".format(data))
#
# create a target data frame that has the desired index
#
reduceddata = self.reduceByIndex(data)
reduceddata = self.reduceToColumns(data, reduceddata)
logger.debug("Result of reduceToColumns:\n{}\n".format(reduceddata))
reduceddata = self.addComparisonColumns(reduceddata)
# # TODO Where do we need these following three lines?
# self.instance_wise = ret
# self.agg = self.aggregateToPivotTable(reduceddata)
# logger.debug("Result of aggregateToPivotTable:\n{}\n".format(self.agg))
self.filtered_agg = {}
self.filtered_instancewise = {}
# filter column data and group by group key
activefiltergroups = self.getActiveFilterGroups()
nonemptyactivefiltergroups = activefiltergroups[:]
#
# set global data frame for filter groups to speed up the computations
#
IPETFilterGroup.setGlobalDataFrameAndIndex(reduceddata, self.getRowIndex())
self.computeFilterResults(reduceddata)
self.filter_masks = {}
for fg in activefiltergroups:
# iterate through filter groups, thereby aggregating results for every group
filtergroupdata = self.applyFilterGroup(reduceddata, fg, self.getRowIndex())
if (len(filtergroupdata) == 0):
nonemptyactivefiltergroups.remove(fg)
logger.warning("Filtergroup {} is empty and has been deactived.".format(fg.getName()))
continue
logger.debug("Reduced data for filtergroup {} is:\n{}".format(fg.getName(), filtergroupdata))
self.filtered_instancewise[fg.name] = self.convertToHorizontalFormat(filtergroupdata)
self.filtered_agg[fg.name] = self.aggregateToPivotTable(filtergroupdata)
if len(nonemptyactivefiltergroups) > 0:
if self.getColIndex() == []:
for fg in nonemptyactivefiltergroups:
self.filtered_agg[fg.name].index = [fg.name]
dfs = [self.filtered_agg[fg.name] for fg in nonemptyactivefiltergroups]
names = [fg.name for fg in nonemptyactivefiltergroups]
if self.getColIndex() == []:
self.retagg = | pd.concat(dfs) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
# from sklearn.tree import DecisionTreeClassifier
# from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
import sklearn.metrics as metrics
from sklearn.metrics import confusion_matrix, multilabel_confusion_matrix
from skmultilearn.problem_transform import ClassifierChain
from skmultilearn.problem_transform import BinaryRelevance
from skmultilearn.adapt import MLkNN
from keras.layers import Dense
from keras.models import Sequential
from keras.metrics import *
##########################################################
# Section 1 - Data Loading
##########################################################
# Getting feature data
finalData = np.array(pd.read_csv('D:/UIP/finaldata.csv', index_col='Name'))
biodata = finalData[:, 21:]
# Getting type data as dataframe for visualisations
pType = | pd.read_csv('D:/UIP/primType.csv', index_col=0) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
import os
import ee
import datetime
import tqdm
import json
import pandas as pd
import geopandas as gp
import numpy as np
import rsfuncs as rs
import multiprocessing as mp
import scipy.interpolate as interp
import matplotlib.pyplot as plt
from tqdm import tqdm
from tqdm.contrib.concurrent import process_map # or thread_map
ee.Initialize()
# Helper functions
def dict2arr(data_dict, var_name):
'''converts ee dictionary output from .getInfo() to a numpy array. Wraps array_from_df'''
data = data_dict[var_name]
lats = data_dict['latitude']
lons = data_dict['longitude']
df = pd.DataFrame([data,lats,lons]).T
df.columns = [var_name, "latitude", 'longitude']
arr = rs.array_from_df(df, var_name)
return arr
def map_cdl2fmp(dictionary,array):
'''maps values on cdl image to the fmp'''
mapping = dictionary.copy()
vec1 = []
vec2 = []
for k,v in mapping.items():
for i in v:
if i == "":
continue
else:
vec1.append(int(i))
vec2.append(int(k))
out_im = np.zeros_like(array)
for k,v in dict(zip(vec1,vec2)).items():
out_im[array==k] =v
return out_im
def map_fmp2kc(dictionary,array):
'''maps values on fmp image to kc'''
mapping = dictionary.copy()
vec1 = []
vec2 = []
for k,v in mapping.items():
vec1.append(k)
vec2.append(v)
out_im = np.zeros_like(array)
for k,v in dict(zip(vec1,vec2)).items():
out_im[array==k] =v
return out_im
def get_monthly_et(dataset, start, end, aoi):
'''
Get gridded monthly ET sums from MODIS
'''
ImageCollection = dataset[0]
var = dataset[1]
scaling_factor = dataset[2]
resolution = dataset[3]
dt_idx = pd.date_range(start,end, freq='MS')
ims = []
seq = ee.List.sequence(0, len(dt_idx)-1)
num_steps = seq.getInfo()
for i in num_steps[:]:
t1 = ee.Date(start).advance(i, 'month')
t2 = t1.advance(1, 'month');
im = ee.Image(ImageCollection.select(var).filterDate(t1, t2).sum().set('system:time_start', t1.millis()))
modis_dat = im.pixelLonLat().addBands(im).multiply(scaling_factor).reduceRegion(reducer=ee.Reducer.toList(),
geometry=aoi,
scale=1000, crs ='EPSG:4326')
modis_dict = modis_dat.getInfo()
modis_im = dict2arr(modis_dict, var)
ims.append(modis_im)
return ims
def calc_monthly_sum(dataset, startdate, enddate, area):
'''
Calculates monthly sums (pd.Dataframe) for EE data given startdate, enddate, and area
Datasets are stored in `data` dict below.
Note the "scaling_factor" parameter,
which is provided by EE for each dataset, and further scaled by temporal resolution to achieve monthly resolution
This is explicitly written in the `data` dict
EE will throw a cryptic error if the daterange you input is not valid for the product of interest, or if the AOI is e.g. in middle of ocean
'''
ImageCollection = dataset[0]
var = dataset[1]
scaling_factor = dataset[2]
resolution = dataset[3]
dt_idx = pd.date_range(startdate,enddate, freq='MS')
sums = []
seq = ee.List.sequence(0, len(dt_idx)-1)
num_steps = seq.getInfo()
for i in num_steps:
start = ee.Date(startdate).advance(i, 'month')
end = start.advance(1, 'month');
im = ee.Image(ImageCollection.select(var).filterDate(start, end).sum().set('system:time_start', start.millis()))
scale = im.projection().nominalScale()
scaled_im = im.multiply(scaling_factor).multiply(ee.Image.pixelArea()).multiply(1e-12) # mm --> km^3
sumdict = scaled_im.reduceRegion(
reducer = ee.Reducer.sum(),
geometry = area,
scale = resolution,
bestEffort= True)
total = sumdict.getInfo()[var]
sums.append(total)
sumdf = pd.DataFrame(np.array(sums), dt_idx)
sumdf.columns = [var]
df = sumdf.astype(float)
return df
def resample_1km_30m(im_1km,im_30m):
'''
Interpolates 1 km modis data on to 30m landsat grid
'''
W, H = im_1km.shape[:2]
new_W, new_H = im_30m.shape[:2]
xrange = lambda x: np.linspace(0, 1, x)
f = interp.interp2d(xrange(H), xrange(W), im_1km, kind="linear")
new_arr = f(xrange(new_H), xrange(new_W))
return new_arr
def interp_modis_nans(modis_image):
'''
interpolates nans in modis imagery. Doesn't work if a whole row/col at edge of image is all nans
'''
W, H = modis_image.shape[:2]
# Mask nans
array = np.ma.masked_invalid(modis_image)
# Make the outgrid
xi = np.linspace(0, H, H)
yi = np.linspace(0, W, W)
xx, yy = np.meshgrid(xi, yi)
# xx, yy = np.meshgrid(new_W, new_H)
x1 = xx[~array.mask]
y1 = yy[~array.mask]
newarr = array[~array.mask]
new_arr = interp.griddata((x1, y1), newarr.ravel(), (xx, yy),method='linear')
return new_arr
def find_nearest_nlcd(yearint, yearlist = [2001, 2004, 2006, 2008, 2011, 2013, 2016]):
absolute_diff = lambda list_value : abs(list_value - yearint)
closest_value = min(yearlist, key=absolute_diff)
return closest_value
def process_poly(polylist):
'''
main routine
'''
polygon, polyidx, outdir = polylist[0], polylist[1], polylist[2]
tqdm.write("Processing Polygon {}".format(polyidx))
# Setup write dir
# outdir = os.path.join(os.getcwd(), "../data/ETkc")
# if not os.path.exists(outdir):
# os.mkdir(outdir)
# Check if file already exists
outfn = os.path.join(outdir, str(polyidx) +".csv")
if os.path.exists(outfn):
print("already processed {} ... skipping".format(polyidx))
return
# Load data
kc = | pd.read_csv('../data/fmp_kc_faunt.csv') | pandas.read_csv |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
pd.Series(dtype="float64"),
pd.Series([], dtype="float64"),
],
)
def test_series_keys(ps):
gds = cudf.from_pandas(ps)
if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex):
assert_eq(ps.keys().astype("float64"), gds.keys())
else:
assert_eq(ps.keys(), gds.keys())
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
pd.DataFrame(),
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
pd.DataFrame([]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([], index=[100]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = cudf.from_pandas(other)
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({12: [], 22: []}),
pd.DataFrame([[1, 2], [3, 4]], columns=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[0, 1], index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[1, 0], index=[7, 8]),
pd.DataFrame(
{
23: [315.3324, 3243.32432, 3232.332, -100.32],
33: [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
0: [315.3324, 3243.32432, 3232.332, -100.32],
1: [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.Series([10, 11, 23, 234, 13]),
pytest.param(
pd.Series([10, 11, 23, 234, 13], index=[11, 12, 13, 44, 33]),
marks=pytest.mark.xfail(
reason="pandas bug: "
"https://github.com/pandas-dev/pandas/issues/35092"
),
),
{1: 1},
{0: 10, 1: 100, 2: 102},
],
)
@pytest.mark.parametrize("sort", [False, True])
def test_dataframe_append_series_dict(df, other, sort):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
if isinstance(other, pd.Series):
other_gd = cudf.from_pandas(other)
else:
other_gd = other
expected = pdf.append(other_pd, ignore_index=True, sort=sort)
actual = gdf.append(other_gd, ignore_index=True, sort=sort)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_series_mixed_index():
df = cudf.DataFrame({"first": [], "d": []})
sr = cudf.Series([1, 2, 3, 4])
with pytest.raises(
TypeError,
match=re.escape(
"cudf does not support mixed types, please type-cast "
"the column index of dataframe and index of series "
"to same dtypes."
),
):
df.append(sr, ignore_index=True)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
| pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}) | pandas.DataFrame |
import pandas as pd
import yaml
import os
from pathlib import Path
import numpy as np
import argparse
import seaborn as sns
import shutil
from tdlda.benchmark.visualization import _ds_pretty, _jm_pretty, _jm_and_pipe_pretty, plot_benchmark_results
import re
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot as plt
sns.set_palette("colorblind")
sns.set(style="whitegrid")
def best_pipeline_of_family_for_dataset(ds_pd, family_identifier, error_action='warn'):
fam_pd = ds_pd[ds_pd['pipeline'].str.contains(re.escape(family_identifier))]
nan_thresh = 50
for pipeline in fam_pd['pipeline'].unique():
cur_pipe = fam_pd.loc[fam_pd['pipeline'] == pipeline]
pipe_nan_perc = 100 * cur_pipe['score'].isna().sum() / len(cur_pipe)
if pipe_nan_perc >= nan_thresh or np.isnan(pipe_nan_perc):
print(f'{pipeline} has {pipe_nan_perc:1.2f}% nans, which exceeds nan_threshold of {nan_thresh}%.'
f' Removing from analysis.')
fam_pd = fam_pd.loc[~(fam_pd['pipeline'] == pipeline)]
fam_pd = fam_pd.groupby('pipeline').mean()
fam_pd = fam_pd.sort_values(by='score', ascending=False)
if not fam_pd.empty:
best_pipeline_name = fam_pd.iloc[0:1].index[0]
best_pipeline_pd = ds_pd[ds_pd['pipeline'].str.contains(best_pipeline_name, regex=False)]
best_pipeline_pd = best_pipeline_pd.replace(best_pipeline_name, f'zref_{best_pipeline_name}')
return best_pipeline_pd
else:
fail_str = f'Did not find a dataset using family identifier: {family_identifier}'
if error_action == 'warn':
print(f'Warning: {fail_str}')
elif error_action == 'raise':
raise ValueError(fail_str)
else:
raise ValueError('Unknown error_action.')
return None
plt.rcParams['backend'] = 'QT4Agg'
plt.ioff()
LOCAL_CONFIG_FILE = f'local_config.yaml'
try:
with open(LOCAL_CONFIG_FILE, 'r') as conf_f:
local_cfg = yaml.load(conf_f, Loader=yaml.FullLoader)
except OSError:
os.chdir('scripts')
with open(LOCAL_CONFIG_FILE, 'r') as conf_f:
local_cfg = yaml.load(conf_f, Loader=yaml.FullLoader)
RESULTS_ROOT = Path(local_cfg['results_root']) / local_cfg['benchmark_meta_name']
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('results_path',
help=f'Grouping and name of the results, e.g.: 2020-03-02/e88707f1-caa7-4318-92d0-3cd6acef8c2f')
args = parser.parse_args()
RESULTS_FOLDER = RESULTS_ROOT / args.results_path
PLOTS_FOLDER = RESULTS_FOLDER / 'plots'
ANALYSIS_CONFIG_FILE = f'analysis_config.yaml'
with open(RESULTS_FOLDER / ANALYSIS_CONFIG_FILE, 'r') as conf_f:
ana_cfg = yaml.load(conf_f, Loader=yaml.FullLoader) # todo use this somewhere
os.makedirs(PLOTS_FOLDER, exist_ok=True)
sub_results = []
for csv_f in RESULTS_FOLDER.glob('*.csv'):
sub_results.append(pd.read_csv(csv_f))
r = pd.concat(sub_results, ignore_index=True)
r['dataset'] = r['dataset'].replace({
'003-2015': 'BNCI_healthy_2015',
'Spot Pilot P300 dataset single': 'Spot_single_trial',
'008-2014': 'BNCI_ALS_patients',
'EPFL P300 dataset': 'EPFL',
'009-2014': 'BNCI_healthy_2014',
'Spot Pilot P300 dataset pooled': 'Spot_pooled',
'Brain Invaders 2013a': 'Brain_invaders',
'Aphasia Direction Study dataset words': 'Aphasia_direction_words',
'Aphasia Direction Study dataset oddball': 'Aphasia_direction_oddball',
'Aphasia Main Study dataset oddball': 'Aphasia_main_oddball',
'Aphasia Main Study dataset words bci': 'Aphasia_main_words_bci',
'Aphasia Main Study dataset words button': 'Aphasia_main_words_button',
'Visual Speller LLP': 'Visual_LLP',
'Visual Speller MIX': 'Visual_MIX',
})
print(f'Analysis done for {len(r["dataset"].unique())} datasets.')
print(r["dataset"].unique())
#%% data manipulation and cleanup
pipelines = r['pipeline'].unique()
threshold_nans_percent = 10
for p in pipelines:
asd = r.loc[r['pipeline'] == p]
nan_perc = 100*asd['score'].isna().sum() / len(asd)
print(f'{p}: {nan_perc:1.2f}% NaNs')
if nan_perc >= threshold_nans_percent:
print(f'{p} exceeds nan_threshold of {threshold_nans_percent}. Removing from analysis.')
r = r.loc[~(r['pipeline'] == p)]
r = r.sort_values(by=['pipeline'])
highscores = r.groupby(['pipeline', 'dataset', 'subject']).mean() \
.groupby(['pipeline', 'dataset']).mean().groupby(['pipeline']).mean().sort_values(by='score')
# highscores = r.groupby('pipeline').mean().sort_values(by='score')
print(highscores)
highscores.to_csv(PLOTS_FOLDER / 'highscores_all_datasets.csv', index=True, float_format='%.4f')
# %% Highscores
bounds = [('tiny', 0, 100), ('small', 100, 250), ('medium', 250, 700), ('large', 700, np.inf)]
for b in bounds:
print(b)
r_sub = r[(r['samples'] >= b[1]) & (r['samples'] < b[2])]
highscores = r_sub.groupby(['pipeline', 'dataset', 'subject']).mean()\
.groupby('pipeline').mean().sort_values(by='score')
highscores.to_csv(PLOTS_FOLDER / f'highscores_{b[0]}_datasets.csv', index=True, float_format='%.4f')
#%%
# Split EPFL dataset into two datasets: healthy vs. patients
r.loc[np.logical_and(r['dataset'] == 'EPFL', r['subject'] <= 4), 'dataset'] = 'EPFL_disabled'
r.loc[np.logical_and(r['dataset'] == 'EPFL', r['subject'] > 4), 'dataset'] = 'EPFL_healthy'
# Split Brain invaders into two datasets: single-session and multi-session
r.loc[np.logical_and(r['dataset'] == 'Brain_invaders', r['subject'] <= 7), 'dataset'] = 'Brain_invaders_multisession'
r.loc[np.logical_and(r['dataset'] == 'Brain_invaders', r['subject'] > 7), 'dataset'] = 'Brain_invaders_singlesession'
# %%
r_bkp = r
for ds in r['dataset'].unique():
print(f'\n\n\n======={ds}=======')
sub = r.loc[r['dataset'] == ds]
sub = sub[sub['pipeline'].str.contains('_rg_')]
temp = sub.groupby('pipeline').mean()
temp = temp.sort_values(by='score', ascending=False)
with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', 180):
print(temp.iloc[0:10])
# datasets = ['003-2015', 'Spot Pilot P300 dataset', '008-2014', 'EPFL P300 dataset', '009-2014',
# 'Spot Pilot P300 dataset pooled', 'Brain Invaders 2013a']
# dataset_labels = ['BNCI_Healthy_2015', 'Spot single trial']
dims = ['jm_few', 'jm_standard', 'jm_numerous', 'sel_all']
# dim_label = ['$T_2$', '$T_5$', '$T_{10}$', '$T_{all}$']
for ds in r['dataset'].unique():
sub = r.loc[r['dataset'] == ds]
ref_pipeline_family = ['_rg_', '_kPCA']
ref_pipeline_family = ['ceat_rg_xdawncomps_5_xdawnclasses_Target', 'jm_numerous_kPCA(70)_skl_lsqr']
ref_pipes = []
for rpf in ref_pipeline_family:
ref_pipes.append(best_pipeline_of_family_for_dataset(sub, rpf, error_action='warn'))
for d_i, d_str in enumerate(dims):
d_pd = sub[sub['pipeline'].str.contains(f'{d_str}')]
d_pd = d_pd[~d_pd['pipeline'].str.contains(f'_kPCA')]
for rp in ref_pipes:
d_pd = pd.concat([d_pd, rp]) if rp is not None else d_pd
# # COMPLETE VERSION OF PLOT FUNCTION CALL
# plot_benchmark_results(d_pd, dataset=ds, jm_dim=d_str, save=True, dim_prefix=d_str, ylim='auto',
# output_dir=PLOTS_FOLDER, session_mean=True, out_prefix='')
# # PAPER VERSION OF PLOT FUNCTION CALL
# paper_pipes = ['changeblock_standard_cov_no_chshr', 'skl_lsqr', 'default_bbci', '_rg_', '_kPCA']
# d_pd = d_pd[d_pd['pipeline'].str.contains('|'.join(paper_pipes))]
plot_benchmark_results(d_pd, dataset=ds, jm_dim=d_str, save=True, dim_prefix=d_str, ylim='auto',
output_dir=PLOTS_FOLDER, session_mean=True, figsize=(4, 3), out_prefix='')
plt.close("all")
# %%
REF_PIPELINES = [
'jm_numerous_lda_p_cov',
'ceat_rg_xdawncomps_5_xdawnclasses_Target',
]
force_reload = False
r_temp = r
for REF_PIPELINE in REF_PIPELINES:
ref_path = PLOTS_FOLDER / f'REF_{REF_PIPELINE}.csv.gz'
if not ref_path.exists() or force_reload:
dataset_subject = r_temp[['dataset', 'subject', 'session']].drop_duplicates().reset_index(drop=True)
subsets = []
for row in dataset_subject.itertuples():
print(f'{row.Index} of {len(dataset_subject)} ({100*row.Index / len(dataset_subject):1.2f} %)')
subset = r_temp.loc[np.logical_and(r_temp['dataset'] == row.dataset, np.logical_and(
r_temp['subject'] == row.subject, r_temp['session'] == row.session))]
ref_score = float(subset.loc[subset['pipeline'] == REF_PIPELINE]['score'])
subset.loc[:, 'score'] -= ref_score
subsets.append(subset)
r_ref = | pd.concat(subsets) | pandas.concat |
#!/usr/bin/env python
# This script takes a task name (same name as in LAB KEY LIMS) and returns the
# input file paths and proposed output paths.
import sys
import os
import argparse
from shutil import copyfile
import sqlite_functions
import labkey as lk
import requests
import json
import pandas as pd
import hashlib
import time
import numpy as np
from datetime import datetime, time as dtime
from subprocess import check_output
import re
import argparse
# parse arguments
task_name=''
parser = argparse.ArgumentParser(description='This script prints the file paths (and output paths) for a given task, where a task is what needs to be converted (i.e. mzml -> h5).')
parser.add_argument("-c","--config",help="This is a json file that will be used to create the inputs.json file for the WDL. It's format should be like a regular \"inputs.json\".", type=str, required=True)
parser.add_argument("-o","--out",help="The name of the ouput json file that the WDL will use.", type=str, required=True)
parser.add_argument("-a","--api",help="The file that contains the api key, i.e. \"apikey|23ladsf9932riadifa\".", type=str, required=True)
parser.add_argument("-w","--wdl",help="The name as shown in the first line of the WDL file. This will be used in the inputs.json file.", type=str, required=True)
args = parser.parse_args()
labkey_server='metatlas-dev.nersc.gov'
project_name='/LIMS'
must_be = [
"mzml_to_hdf5",
"mzml_to_pactolus",
"mzml_to_spectralhits",
"raw_to_mzml"]
if not os.path.exists(args.config):
print("Please use one of the accepted task names: %s" % must_be)
sys.exit()
def tasks_to_do(api_key):
"""
Possible tasks are:
mzml_to_hdf5
mzml_to_pactolus
mzml_to_spectralhits
raw_to_mzml
"""
sql = """SELECT DISTINCT task FROM file_conversion_task;"""
con = lk.utils.create_server_context(labkey_server, project_name, use_ssl=True,api_key=api_key)
# base execute_sql
schema = 'lists'
sql_result = lk.query.execute_sql(con, schema, sql,max_rows=1e6)
if sql_result is None:
print('execute_sql: Failed to load results from ' + schema + '.' + table)
else:
df = pd.DataFrame(sql_result['rows'])
df = df[[c for c in df.columns if not c.startswith('_')]]
return list(df['task'])
def get_raw_files(task_name,api_key):
sql = "SELECT Key, file_conversion_task.input_file,file_conversion_task.output_file FROM file_conversion_task WHERE file_conversion_task.task='%s' AND file_conversion_task.status <> '09 error';" % (task_name)
con = lk.utils.create_server_context(labkey_server, project_name, use_ssl=True,api_key=api_key)
# base execute_sql
schema = 'lists'
sql_result = lk.query.execute_sql(con, schema, sql,max_rows=1e6)
if sql_result is None:
print('execute_sql: Failed to load results from ' + schema + '.' + table)
else:
df = pd.DataFrame(sql_result['rows'])
df = df[[c for c in df.columns if not c.startswith('_')]]
| pd.set_option('display.max_colwidth', -1) | pandas.set_option |
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231,E0202
from numpy import nan
from pandas.compat import lmap
from pandas import compat
import numpy as np
from pandas.types.missing import isnull, notnull
from pandas.types.common import _ensure_platform_int
from pandas.core.common import _try_sort
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.series import Series
from pandas.core.frame import (DataFrame, extract_index, _prep_ndarray,
_default_index)
import pandas.core.algorithms as algos
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays)
from pandas.core.generic import NDFrame
from pandas.sparse.series import SparseSeries, SparseArray
from pandas.util.decorators import Appender
import pandas.core.ops as ops
class SparseDataFrame(DataFrame):
"""
DataFrame containing sparse floating point data in the form of SparseSeries
objects
Parameters
----------
data : same types as can be passed to DataFrame
index : array-like, optional
column : array-like, optional
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries. Will not
override SparseSeries passed in
"""
_constructor_sliced = SparseSeries
_subtyp = 'sparse_frame'
def __init__(self, data=None, index=None, columns=None, default_kind=None,
default_fill_value=None, dtype=None, copy=False):
# pick up the defaults from the Sparse structures
if isinstance(data, SparseDataFrame):
if index is None:
index = data.index
if columns is None:
columns = data.columns
if default_fill_value is None:
default_fill_value = data.default_fill_value
if default_kind is None:
default_kind = data.default_kind
elif isinstance(data, (SparseSeries, SparseArray)):
if index is None:
index = data.index
if default_fill_value is None:
default_fill_value = data.fill_value
if columns is None and hasattr(data, 'name'):
columns = [data.name]
if columns is None:
raise Exception("cannot pass a series w/o a name or columns")
data = {columns[0]: data}
if default_fill_value is None:
default_fill_value = np.nan
if default_kind is None:
default_kind = 'block'
self._default_kind = default_kind
self._default_fill_value = default_fill_value
if isinstance(data, dict):
mgr = self._init_dict(data, index, columns)
if dtype is not None:
mgr = mgr.astype(dtype)
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, index, columns)
if dtype is not None:
mgr = mgr.astype(dtype)
elif isinstance(data, SparseDataFrame):
mgr = self._init_mgr(data._data,
dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, DataFrame):
mgr = self._init_dict(data, data.index, data.columns)
if dtype is not None:
mgr = mgr.astype(dtype)
elif isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif data is None:
data = DataFrame()
if index is None:
index = Index([])
else:
index = _ensure_index(index)
if columns is None:
columns = Index([])
else:
for c in columns:
data[c] = SparseArray(np.nan, index=index,
kind=self._default_kind,
fill_value=self._default_fill_value)
mgr = to_manager(data, columns, index)
if dtype is not None:
mgr = mgr.astype(dtype)
| NDFrame.__init__(self, mgr) | pandas.core.generic.NDFrame.__init__ |
import sys
import numpy as np
from ArtSimCore import ArtSimCore
from rank_distance import tau, ndcg
import pandas as pd
import time
dblp_fcc = "../data/evaluation/dblp_fcc_varying_future_period_30percent_WITH_ZEROS.txt"
#dblp_fcc = "../data/evaluation/dblp_fcc_varying_future_period_30percent.txt"
if len(sys.argv) != 8 and len(sys.argv) != 9:
print("Usage: python3 artsim.py <paper_details_file> <scores_file> <sim_file_PAP> <sim_file_PTP> <connections_file_PV> <cold_start_year> <eval_method> <ndcg:k>")
sys.exit(-1)
paper_details = sys.argv[1]
scores_file = sys.argv[2]
sim_file_PA = sys.argv[3]
sim_file_PT = sys.argv[4]
con_file_PV = sys.argv[5]
cold_start_year = int(sys.argv[6])
eval_method = sys.argv[7]
k = -1
if eval_method == "ndcg":
if len(sys.argv) != 9:
print("Usage: python3 artsim.py <paper_details_file> <scores_file> <sim_file_PAP> <sim_file_PTP> <connections_file_PV> <cold_start_year> <eval_method> <ndcg:k>")
sys.exit(-1)
k = int(sys.argv[8])
artsim_core = ArtSimCore()
artsim_core.read_paper_ids(paper_details)
artsim_core.read_paper_scores(scores_file)
artsim_core.mark_cold_start_papers(cold_start_year)
artsim_core.read_similarities(sim_file_PA, 'PA')
artsim_core.read_similarities(sim_file_PT, 'PT')
artsim_core.read_connections(con_file_PV, 'PV')
ground_truth_df = pd.read_csv(dblp_fcc, sep='\t', header=None, names=['paper_id', 'truth_score'])
for precision in [1]:
splits = pow(10, precision) + 1
artsim_time = 0
tau_time = 0
for alpha in np.linspace(0, 1, splits):
for beta in np.linspace(0, 1, splits):
for delta in np.linspace(0, 1, splits):
alpha = round(alpha, precision)
beta = round(beta, precision)
gamma = 0
delta = round(delta, precision)
sum = alpha + beta + gamma + delta
if (round(sum, precision) != 1.0):
continue
start = time.time()
results = None
results = artsim_core.run(alpha, beta, gamma, delta)
artsim_time += (time.time() - start)
start = time.time()
result_df = | pd.DataFrame(results, columns=['paper_id', 'pred_score']) | pandas.DataFrame |
#########################################################################
#########################################################################
# Classes for handling genome-wide association input and output files, ##
# analysis and qc programs, and post-hoc analyses ##
#########################################################################
#########################################################################
import cgatcore.experiment as E
import cgatcore.iotools as iotools
import numpy as np
import pandas as pd
import pandas.io.sql as pdsql
import re
import random
import os
import subprocess
import rpy2.robjects as ro
from rpy2.robjects import r as R
from rpy2.robjects import pandas2ri as py2ri
# set matplotlib non-interactive backend to Agg to
# allow running on cluster
import collections
import sqlite3 as sql
from math import *
import scipy.stats as stats
class FileGroup(object):
'''
An object for holding, formatting and processing files for genome-wide
association analysis including compressed and binary files
File types supported:
* plink - .ped and .map files
* plink binary - .bim, .fam. and .bed files
* variant call format - .vcf and .bcf (including gzipped vcf)
* Oxford format - .gen or .bgen with matched sample text file (must
be .sample)
* GRM_binary - genetic relationship matrix calculated in an appropriate
program in binary format. File suffixes are *.grm.bin, *.grm.N.bin
and *.grmid
* GRM_gz - previously calcualted gzip compressed GRM, file suffixes
are *.grm.gz and *.grm.id
Phenotypes are assumed to be contained in the relevant files, if not
then an additional phenotypes files can be included using the
`phenotypes` argument. Covariate files (if different from the phenotypes
file) can also be included in the instantiation of a :FileGroup:
object using the `covarite_files` argument.
Only the `files` and `file_format` arguments are required.
Genotype data are assumed to be raw genotype calls. This can be modified
using the `genotype_format` argument upon instantiation. Values allowed
are:
* calls - standard bi-allelic genotype calls, i.e. AA, AB, BB
* imputed_call - discrete genotype calls from imputed data,
essentially treated the same as ``calls``
* genotype_prob - posterior probabilities for each genotype class,
i.e. 0.88 0.07 0.05 corresponding to homozygote
reference, heterozygote then homozygote rare allele.
'''
# Defaults for file formats
ped_file = None
map_file = None
bim_file = None
fam_file = None
bed_file = None
sample_file = None
gen_file = None
bgen_file = None
vcf_file = None
bcf_file = None
def __init__(self, files, file_format, phenotypes=None,
genotype_format="calls", covariate_files=None):
self.files = files
self.file_format = file_format
self.pheno_file = phenotypes
self.genotype_format = genotype_format
self.covariate_files = covariate_files
self.set_file_prefix(files)
def set_file_prefix(self, infiles):
'''Get file prefixes from input files. These are used across all
file formats, e.g. myfile.bed, myfile.bim, myfile.fam name=myfile.
Only use periods, '.' to denote file suffixes. use hyphens and
underscores for separating file names.
Set these to the appropriate attributes.
'''
file_prefixes = set()
for f in infiles:
# get all input file prefixes
if len(f.split("/")) > 1:
g = f.split("/")[-1]
fdir = f.split("/")[:-1]
fdir = "/".join(fdir)
ffile = fdir + "/" + g.split(".")[0]
file_prefixes.add(ffile)
else:
file_prefixes.add(f.split(".")[0])
# if only prefix then use this for all data files
if len(file_prefixes) == 1:
self.name = [xf for xf in file_prefixes][0]
else:
# if there are multiple prefixes then use separate
# flags for file inputs
self.name = None
# define file types by their suffix instead
if self.file_format == "plink":
self.ped_file = [pf for pf in infiles if re.search(".ped",
pf)][0]
self.map_file = [mf for mf in infiles if re.search(".map",
mf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.ped_file
except AssertionError:
raise ValueError(".ped file is missing, please "
"specify")
try:
assert self.map_file
except AssertionError:
raise ValueError(".map file is missing, please "
"specify")
elif self.file_format == "plink_binary":
self.fam_file = [ff for ff in infiles if re.search(".fam",
ff)][0]
self.bim_file = [fb for fb in infiles if re.search(".bim",
fb)][0]
self.bed_file = [bf for bf in infiles if re.search(".bed",
bf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.fam_file
except AssertionError:
raise ValueError(".fam file is missing, please "
"specify")
try:
assert self.bim_file
except AssertionError:
raise ValueError(".bim file is missing, please "
"specify")
try:
assert self.bed_file
except AssertionError:
raise ValueError(".bed file is missing, please "
"specify")
elif self.file_format == "oxford":
self.gen_file = [gf for gf in infiles if re.search(".gen",
gf)][0]
self.sample_file = [sf for sf in infiles if re.search(".sample",
sf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.gen_file
except AssertionError:
raise ValueError(".gen file missing, please "
"specify")
try:
assert self.sample_file
except AssertionError:
raise ValueError(".sample file missing, please "
"specify")
elif self.file_format == "oxford_binary":
self.bgen_file = [bg for bg in infiles if re.search(".bgen",
bg)][0]
self.sample_file = [sf for sf in infiles if re.search(".sample",
sf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.bgen_file
except AssertionError:
raise ValueError(".bgen file is missing, please "
"specify")
try:
assert self.sample_file
except AssertionError:
raise ValueError(".sample file is missing, please "
"specify")
elif self.file_format == "vcf":
self.vcf_file = [vf for vf in infiles if re.search(".vcf",
vf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.vcf_file
except AssertionError:
raise ValueError(".vcf file is missing, please "
"specify")
elif self.file_format == "bcf":
self.bcf_file = [bv for bv in infiles if re.search(".bcf",
bv)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.bcf_file
except AssertionError:
raise ValueError(".bcf file is missing, please "
"specify")
elif self.file_format == "GRM_binary":
self.id_file = [ig for ig in infiles if re.search(".grm.id",
ig)][0]
self.n_file = [gn for gn in infiles if re.search(".grm.N.bin",
gn)][0]
self.bin_file = [gb for gb in infiles if re.search(".grm.bin",
gb)][0]
# check files exits
try:
assert self.id_file
except AssertionError:
raise ValueError("GRM ids file is missing, please "
"specify")
try:
assert self.n_file
except AssertionError:
raise ValueError("grm.N file is missing, please "
"specify")
try:
assert self.bin_file
except AssertionError:
VaueError("GRM genotype is missing, please "
"specify")
elif self.file_format == "GRM_plink":
self.id_file = [ig for ig in infiles if re.search(".rel.id",
ig)][0]
self.rel_file = [gn for gn in infiles if re.search(".rel.N.bin",
gn)][0]
# check files exits
try:
assert self.id_file
except AssertionError:
raise ValueError("GRM ids file is missing, please "
"specify")
try:
assert self.rel_file
except AssertionError:
raise ValueError("rel.N file is missing, please "
"specify")
def set_phenotype(self, pheno_file=None, pheno=1):
'''
Set the phenotype for a set of individuals
using an external phenotypes file.
Default is to use the (n+2)th column, designated
as pheno 1.
'''
if type(pheno) == int:
pheno = str(pheno)
elif type(pheno) == str:
pass
else:
raise AttributeError("Type of pheno unknown. "
"Must be str or int.")
self.pheno_file = pheno_file
self.pheno = pheno
class GWASProgram(object):
'''
A base level object for programs designed to perform genome-wide
association analysis and operate on genome-wide genotyping data.
[INSERT PROPER DOCSTRING - see style guide]
'''
def __init__(self, executable=None, required_format=None):
self.executable = executable
self.require_format = required_format
def program_call(self, infiles, outfile):
'''build a statement to perform genome-wide
analysis using infiles
'''
return ""
def postprocess(self, infiles, outfile):
'''collect and process output files from
program - format for Result class objects'''
return ""
def build(self, infiles, outfile):
'''run analysis program'''
cmd_program = self.program_call(infile, outfile)
cmd_postprocess = self.postprocess(infiles, outfile)
if cmd_postprocess:
cmd_postprocess = cmd_postprocess.strip().endswith(";")
assert cmd_postprocess
else:
pass
statement = " checkpoint; ".join((cmd_program,
cmd_postprocess))
return statement
class GCTA(GWASProgram):
'''
GCTA is designed for computing genetic relationship matrices, linear
mixed model analyses and phenotype estimation/prediction.
It can also perform SNP-wise GWAS.
Files MUST be in Plink binary format
'''
def __init__(self, files, options=None, settings=None,
design=None):
self.infiles = files
self.options = options
self.settings = settings
self.design = design
self.executable = "gcta64"
self.statement = {}
self.filters = []
def program_call(self, infiles, outfile):
'''build GCTA call statement on infiles'''
statement = []
statement.append(self.executable)
if infiles.name:
inputs = self._build_single_file_input(infiles,
infiles.file_format)
statement.append(inputs)
else:
raise AttributeError("Files must be in binary plink format "
"or as a GRM to use GCTA. Please "
"convert and try again.")
if infiles.pheno_file:
statement.append(" --pheno %s --mpheno %s " % (infiles.pheno_file,
infiles.pheno))
else:
pass
self.statement["program"] = " ".join(statement)
def _build_single_file_input(self, infiles, file_format):
'''internal function only. Use it to construct the
file input flags with --file, --bfile or --data
'''
statement = None
if file_format == "plink":
statement = " --file %s " % infiles.name
elif file_format == "plink_binary":
statement = " --bfile %s " % infiles.name
elif file_format == "oxford" or file_format == "oxford_binary":
statement = " --data %s" % infiles.name
elif file_format == "GRM_binary" or file_format == "GRM_plink":
statement = " --grm %s " % infiles.name
else:
raise AttributeError("file format is not defined or recognised."
"Please define the input corectly when "
"instantiating a FileGroup object")
return statement
def PCA(self, n_pcs="20"):
'''
Perform PCA analysis on previosly generated GRM, output the number n
principal componets, default = 20
'''
self._run_tasks(pca=n_pcs)
def apply_filters(self, filter_type, filter_value):
'''
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* autosome_number - for non-human species, the number of chromosomes to
be considered autosomes
* exclude_snps - text file list of variant IDs to exclude from analysis
[file]
* extract - text file list of variant IDs to include in analysis,
ignores all others. [file]
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
'''
if filter_type == "chromosome":
self._construct_filters(chromosome=filter_value)
elif filter_type == "autosome_number":
self._construct_filters(autosome_number=filter_value)
elif filter_type == "exclude_snps":
self._construct_filters(exclude_snps=filter_value)
elif filter_type == "extract":
self._construct_filters(extract=filter_value)
elif filter_type == "min_allele_frequency":
self._construct_filters(min_allele_frequency=filter_value)
elif filter_type == "max_allele_frequency":
self._construct_filters(max_allele_frequency=filter_value)
elif filter_type == "keep":
self._construct_filters(keep=filter_value)
elif filter_type == "remove":
self._construct_filters(remove=filter_value)
def _construct_filters(self, **kwargs):
'''
Add filter to each GCTA run.
The filters accepted are defined below. These are input as keyword
arguments supported by this function.
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
* keep - keep individuals with matching individual and family IDs.
[file]
* remove - remove all individuals with matching individual and family
IDs. [file]
* extract - text file list of variant IDs to include in analysis,
ignores all others. [file]
* exclude - text file list of variant IDs to exclude from analysis.
[file]
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* autosome - exclude all non-place and non-autosomal variants.
[boolean]
* covariates_file - specify the covariates file with family and
individual IDs in the first two columns. Covariates are in the
(n+2)th column. Only used in conjunction with `covariate_filter`.
[file]
* covariate_filter - covariate columns value to filter on. Can be
used with non-numeric values to filter out individuals with
covariate =/= `covariate_filter` value. [str/int/float]
* covariate_column - column number to apply filtering to if more
than one covariate in the file. [int]
* update_gender - provide gender information in a separate text
file. [file]
* grm_threshold - remove one of a pair of individuals with
estimated relatedness greater than this value.
* ld_significance - p-value threshold for regression test
of LD significance
* genotype_call - GenCall score cut-off for calling raw
genotypes into Plink PED format
* meta_pval - p-value threshold cut-off for conditional
and joint genome-wide analysis
* cojo_window - distance in kb beyond wich SNPs this
distance apart are assumed to be in linkage equilibrium
* cojo_collinear - multiple regression R^2 on selected SNPs
value above which the testing SNP will not be selected.
* cojo_inflation - adjust COJO analysis test statistics
for genomic control. [boolean]
* reml_iterations - maximum number of iterations to use
during reml analysis. Default is 100. [int]
'''
statement = []
# map of keyword arguments recognised to Plink2 filtering flags
filter_map = {"min_allele_frequency": " --maf %s ",
"max_allele_frequency": " --max-maf %s ",
"keep": " --keep %s ",
"remove": " --remove %s ",
"extract": " --extract %s ",
"exclude": " --exclude %s ",
"chromosome": " --chr %s ",
"autosome": " --autosome ",
"autosome_number": " --autosome-num %s ",
"grm_threshold": " --grm-cutoff %s ",
"ld_significance": " --ls-sig %s ",
"genotype_call": " --gencall %s ",
"meta_pval": " --cojo-p %s ",
"cojo_window": " --cojo-wind %s ",
"cojo_collinear": " --cojo-collinear %s ",
"cojo_inflation": " --cojo-gc ",
"reml_iterations": " --reml-maxit %s "}
# compile all filters together, checking for dependencies.
# use a mapping dictionary to extract the relevant flags and
# combinations to use.
filters = []
filter_dict = {}
for key, value in kwargs.items():
filter_dict[key] = value
for each in filter_dict.keys():
try:
assert filter_map[each]
# check for data type <- behaviour is type dependent
if type(filter_dict[each]) == 'bool':
filters.append(filter_map[each])
else:
filter_val = filter_dict[each]
filters.append(filter_map[each] % filter_val)
except KeyError:
E.warn("%s filter not recognised, please see "
"documentation for allowed filters" % each)
self.filters.append(" ".join(filters))
self.statement["filters"] = " ".join(self.filters)
def mixed_model(self, lmm_method, grm=None, qcovar=None,
dcovar=None):
'''
Run a linear mixed model with the GRM used to model
random effects of an estimated genetic relationshi
between individuals
'''
# add the mlm flag to the statement
self._run_tasks(lmm=lmm_method)
# construct the rest of mlm statement
statement = []
if qcovar:
statement.append(" --qcovar %s " % qcovar)
else:
pass
if dcovar:
statement.append(" --covar %s " % dcovar)
else:
pass
try:
statement.append(" --grm %s " % grm)
except ValueError:
E.warn("No GRM has been provided, the GRM ")
self.statement["mlm"] = " ".join(statement)
def reml_analysis(self, method, parameters, prevalence=None,
qcovariates=None, discrete_covar=None):
'''
Use REML to estimate the proportion of phenotypic variance
explained by the estimated genetic relationship between
individuals.
Arguments
---------
method: string
GCTA method to use for REML estimation of h2. Includes:
* snpBLUP - calculate the SNP BLUPs from the genotype
data and the estimated total genetic value/ breeding value
* fixed_cor -
* priors - provide initial priors for the variance components
estimation
* unconstrained - allow variance estimates to fall outside
of the normal parameter space, bounded [0, ).
* GxE - estimate the contribution of GxE with covariates
to the phenotype variance
* BLUP_EBV - output individual total genetic effect/breeding
values
'''
statement = []
try:
params = parameters.split(",")
if len(params) == 1:
params = params[0]
else:
pass
except AttributeError:
params = parameters
self._run_tasks(parameter=params,
greml=method)
if prevalence:
statement.append(" --prevalence %0.3f " % prevalence)
else:
pass
if qcovariates:
statement.append(" --qcovar %s " % qcovariates)
else:
pass
if discrete_covar:
statement.append(" --covar %s " % discrete_covar)
else:
pass
self.statement["reml"] = " ".join(statement)
def _run_tasks(self, parameter=None, **kwargs):
'''
The principal functions of GCTA revolve around GRM estimation
and variance components analysis, such as REML estimation of
heritability and variance components, BLUP and phenotype prediciton.
It can also be used to do PCA and conditional and joint GWAS.
Tasks
-----
* pca - perform principal components analysis on a GRM
* greml - perform restricted maximum likelihood analysis
for estimation of variance components
* estimate_ld - estimate the linkage disequilibrium structure
over the genomic regions specified
* simulate_gwas - simulate genome-wide association data based
on observed genotype data
* cojo - conditional and joint genome-wide association
analysis across SNPs and covariates
* bivariate_reml - perform GREML on two traits, either both
binary, both quantitative or one of each
* lmm - perform a linear mixed model based association analysis
'''
statement = []
# set up a dictionary of recognised tasks with key word argument
# values as further dictionaries. Use the parameter argument
# to pass arguments by value to string formatting
# put all of the other tasks as options in the calling function
task_map = {"pca": " --pca %s ",
"greml": {"standard": " --reml ",
"priors": " --reml --reml-priors %s ",
"reml_algorithm": " --reml --reml-alg %s ",
"unconstrained": " --reml --reml-no-constrain ",
"GxE": " --reml --gxe %s ",
"LRT": " --reml --reml-lrt %s ",
"BLUP_EBV": " --reml --reml-pred-rand ",
"snpBLUP": " --blup-snp %s "},
"estimate_ld": " --ld %s ",
"simulate_gwas": {"quantitative": " --simu-qt ",
"case_control": " --simu-cc %s %s "},
"cojo": {"stepwise": " --cojo-file %s --cojo-slct ",
"no_selection": " --cojo-file %s --cojo-joint ",
"snp_conditional": " --cojo-file %s --cojo-cond %s "},
"bivariate_reml": {"standard": " --reml-bivar %s ",
"no_residual": " --reml-bivar %s --reml-bivar-nocove ",
"fixed_cor": " --reml-bivar %s --reml-bivar-lrt-rg %s "},
"lmm": {"standard": " --mlma ",
"loco": " --mlma-loco ",
"no_covar": " --mlma-no-adj-covar "},
"remove_relations": {"cutoff": " --grm-cutoff %s "}}
for task, value in kwargs.items():
# check for PCA first as it is not nested in task_map
if task == "pca":
try:
state = task_map[task] % value
statement.append(state)
except TypeError:
statement.append(task_map[task])
statement.append
# LD estimation is likewise not nested
elif task == "estimate_ld":
try:
state = task_map[task] % value
statement.append(state)
except TypeError:
raise IOError("no SNP file list detected")
elif task != "parameter":
try:
# sub_task is a nested dictionary
sub_task = task_map[task]
try:
assert sub_task[value]
try:
# some tasks do not contain task values for the
# parameter argument - catch these with the TypeError
# exception
statement.append(sub_task[value] % parameter)
# the default for parameter is None, check this is appropriate
if not parameter:
E.warn("Parameter value is set to NoneType. "
"Please check this is an appropriate value "
"to pass for this task")
else:
pass
except TypeError:
statement.append(sub_task[value])
except KeyError:
raise KeyError("% Task not recognised, see docs for details of "
"recognised tasks" % task)
except KeyError:
raise KeyError("Task not recognised, see docs for details of "
"recognised tasks")
else:
pass
self.statement["tasks"] = " ".join(statement)
def genetic_relationship_matrix(self, compression="binary", metric=None,
shape="square", options=None):
'''
Calculate the estimated genetic relationship matrix from
genotyping data
* estimate_grm - estimate the realized genetic relationship
matrix between individuals from genotyping data
'''
mapf = {"binary": " --make-grm-bin ",
"gzip": " --make-grm-gz ",
"no_compress": " --make-grm ",
"X_chr": " --make-grm-chr ",
"X_chr_gz": " --make-grm-gz ",
"inbreeding": " --ibc "}
if options == "X_chr":
if compression == "gz":
state = mapf["X_chr_gz"]
else:
state = mapf["X_chr"]
elif options == "inbreding":
state = mapf["inbreeding"]
else:
pass
# check compression is compatible
if compression == "gz":
state = mapf["gzip"]
elif compression == "bin":
state = mapf["binary"]
elif compression is None and not options:
state = mapf["no_compress"]
self.statement["matrix"] = state
def build_statement(self, infiles, outfile, threads=None,
memory=None, parallel=None):
'''
Build statement and execute from components
'''
statement = []
exec_state = self.executable
# calls to function add to the self.statement dictionary
try:
statement.append(self.statement["program"])
except KeyError:
raise AttributeError("Input files and format not detected")
try:
statement.append(self.statement["filters"])
except KeyError:
pass
try:
statement.append(self.statement["tasks"])
except KeyError:
pass
try:
statement.append(self.statement["matrix"])
except KeyError:
pass
try:
statement.append(self.statement["mlm"])
except KeyError:
pass
try:
statement.append(self.statement["reml"])
except KeyError:
pass
if threads:
statement.append(" --thread-num %i " % threads)
else:
pass
# add output flag
statement.append(" --out %s " % outfile)
os.system(" ".join(statement))
class Plink2(GWASProgram):
'''
Run various Plink functions and analysis, including file processing, GRM
calculation, PCA and other GWA tasks
Require Plink v1.9 to be in the users PATH variable as ``plink2`` to
distinguish it from Plink v1.07.
'''
def __init__(self, files, options=None,
settings=None, design=None):
self.infiles = files
self.options = options
self.settings = settings
self.design = design
self.executable = "plink2"
self.statement = {}
self.filters = []
def program_call(self, infiles, outfile):
''' build Plink call statement on infiles'''
statement = []
statement.append(self.executable)
if infiles.name:
inputs = self. _build_single_file_input(infiles,
infiles.file_format)
statement.append(inputs)
else:
inputs = self._build_multiple_file_input(infiles,
infiles.file_format)
statement.append(inputs)
# check for the presence of an additional phenotypes file
try:
if infiles.pheno_file:
statement.append(" --pheno %s --mpheno %s " % (infiles.pheno_file,
infiles.pheno))
else:
pass
except AttributeError:
pass
self.statement["program"] = " ".join(statement)
def hamming_matrix(self, shape, compression, options):
'''
Calculate genomic pair-wise distance matrix between
individuals using Hamming distance across all variants
'''
# check shape is compatible
if not shape:
shape = "triangle"
elif shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if options:
state = self._matrices(matrix_type="hamming", shape=shape,
compression=compression, options=options)
else:
state = self._matrices(matrix_type="hamming", shape=shape,
compression=compression)
self.statement["matrix"] = state
def ibs_matrix(self, shape, compression, options):
'''
Calculate genomic pair-wise similarity matrix between
individuals using proportion of IBS alleles
'''
# check shape is compatible
if shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if options:
state = self._matrices(matrix_type="ibs", shape=shape,
compression=compression, options=options)
else:
state = self._matrices(matrix_type="ibs", shape=shape,
compression=compression)
self.statement["matrix"] = state
def genome_matrix(self, shape, compression, options):
'''
Calculate genomic pair-wise distance matrix between
individuals using 1 - proportion of IBS alleles
'''
# check shape is compatible
if shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if options:
state = self._matrices(matrix_type="genomic", shape=shape,
compression=compression, options=options)
else:
state = self._matrices(matrix_type="genomic", shape=shape,
compression=compression)
self.statement["matrix"] = state
def genetic_relationship_matrix(self, shape, compression, metric,
options=None):
'''
Calculate genomic pair-wise distance matrix between
individuals using proportion of IBS alleles
Requires the use of the Plink2 parallelisation to run with large
cohorts of patients
'''
# check shape is compatible
if shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if metric in ["cov", "ibc2", "ibc3"]:
state = self._matrices(matrix_type="grm", shape=shape,
compression=compression, options=metric)
else:
E.info("%s metric not recognised. Running with default Fhat1" % metric)
state = self._matrices(matrix_type="grm", shape=shape,
compression=compression)
self.statement["matrix"] = state
def apply_filters(self, filter_type, filter_value):
'''
arguments supported by this function.
* genotype_rate - exclude SNPs with a genotyping rate below this
value. [float]
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
* exclude_snp - exclude this single variant
* exclude_snps - text file list of variant IDs to exclude from analysis.
[file]
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* exclude_chromosome - exclude all variants on the specified
chromosome(s). [str/list]
* autosome - exclude all non-place and non-autosomal variants.
[boolean]
* pseudo_autosome - include the pseudo-autosomal region of chromosome
X. [boolean]
* ignore_indels - remove all indels/multi-character allele coding
variants. [boolean]
* snp_bp_range - (from, to) range in bp of variants to include in
analysis. [tuple]
'''
if filter_type == "genotype_rate":
self._construct_filters(genotype_rate=filter_value)
elif filter_type == "hwe":
self._construct_filters(hwe=filter_value)
elif filter_type == "missingness":
self._construct_filters(missingness=filter_value)
elif filter_type == "min_allele_frequency":
self._construct_filters(min_allele_frequency=filter_value)
elif filter_type == "max_allele_frequency":
self._construct_filters(max_allele_frequency=filter_value)
elif filter_type == "exclude_snp":
self._construct_filters(exclude_snp=filter_value)
elif filter_type == "exclude":
self._construct_filters(exclude=filter_value)
elif filter_type == "extract":
self._construct_filters(extract=filter_value)
elif filter_type == "chromosome":
self._construct_filters(chromosome=filter_value)
elif filter_type == "exclude_chromosome":
self._constuct_filters(exclude_chromosome=filter_value)
elif filter_type == "autosome":
self._construct_filters(autosome=filter_value)
elif filter_type == "pseudo_autosome":
self._construct_filters(pseudo_autosome=filter_value)
elif filter_type == "ignore_indels":
self._construct_filters(ignore_indels=filter_value)
elif filter_type == "snp_bp_range":
self._construct_filters(snp_bp_range=filter_value)
elif filter_type == "conditional_snp":
self._construct_filters(conditional_snp=filter_value)
elif filter_type == "keep":
self._construct_filters(keep=filter_value)
elif filter_type == "remove":
self._construct_filters(remove=filter_value)
elif filter_type == "ignore_indels":
self._construct_filters(ignore_indels=filter_value)
def _build_multiple_file_input(self, infiles, file_format):
'''
internal function only. Use it to construct
the appropriate file input flags
'''
statement = None
if file_format == "oxford":
statement = " --gen %s --sample %s " % (infiles.gen_file,
infiles.sample_file)
elif file_format == "oxford_binary":
statement = " --bgen %s --sample %s " % (infiles.bgen_file,
infiles.sample_file)
elif file_format == "plink":
statement = " --ped %s --map %s " % (infiles.ped_file,
infiles.sample_file)
elif file_format == "plink_binary":
statement = " --bed %s --bim %s --fam %s " % (infiles.bed_file,
infiles.bim_file,
infiles.fam_file)
elif file_format == "vcf":
statement = " --vcf %s.vcf.gz " % infiles.vcf_file
elif file_format == "bcf":
statement = " --bcf %s " % infiles.vcf_file
elif file_format == "GRM_binary":
statement = " --grm-bin %s " % infiles.name
else:
raise AttributeError("file format is not defined. Please "
"define the input file formats when "
"instantiating a FileGroup object")
return statement
def _build_single_file_input(self, infiles, file_format):
'''internal function only. Use it to construct the
file input flags with --file, --bfile or --data
'''
statement = None
if file_format == "plink":
statement = " --file %s " % infiles.name
elif file_format == "plink_binary":
statement = " --bfile %s " % infiles.name
elif file_format == "oxford" or file_format == "oxford_binary":
statement = " --data %s" % infiles.name
elif file_format == "GRM_plink":
statement = " --grm.bin %s " % infiles.name
elif file_format == "GRM_binary":
statement = " --grm-bin %s " % infiles.name
elif file_format == "vcf":
statement = " --vcf %s.vcf.gz " % infiles.name
else:
raise AttributeError("file format is not defined or recognised."
"Please define the input corectly when "
"instantiating a FileGroup object")
return statement
def _construct_filters(self, **kwargs):
'''
Add filter to each plink run. [data type]
The filters accepted are defined below. These are input as keyword
arguments supported by this function.
* genotype_rate - exclude SNPs with a genotyping rate below this
value. [float]
* missingness - exclude individuals with total genotype missingness
above this value. [float]
* hwe - p-value threshold for excluding SNPs deviating from
Hardy-Weinberg expectations. [float]
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
* mendelian_error - filter out samples/trios exceeding the error
threshold. [float]
* keep - keep individuals with matching individual and family IDs.
[file]
* remove - remove all individuals with matching individual and family
IDs. [file]
* quality_score_file - vcf file with variants and quality scores. Use
`qual_score_column` and `var_id_col` to specify which columns
correspond to the quality score and variant ID columns.
[file] <int> <int>
* min_qual_score - alters the lower bound of the quality score
threshold; default is 0.[int]
* max_qual_score - sets an upper limit on the quality scores;
default is Inf. [int]
* allow_no_sex - prevents phenotypes set to missing if there is no
gender information. [boolean]
* enforce_sex - force phenotype missing when using --make-bed, --recode
or --write-covar. [boolean]
* subset_filter - filter on a particular subset. Choices are: cases,
controls, males, females, founders, nonfounders. [str]
* extract - text file list of variant IDs to include in analysis,
ignores all others. [file]
* exclude - text file list of variant IDs to exclude from analysis.
[file]
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* exclude_chromosome - exclude all variants on the specified
chromosome(s). [str/list]
* autosome - exclude all non-place and non-autosomal variants.
[boolean]
* pseudo_autosome - include the pseudo-autosomal region of chromosome
X. [boolean]
* ignore_indels - remove all indels/multi-character allele coding
variants. [boolean]
* snp_bp_range - (from, to) range in bp of variants to include in
analysis. [tuple]
* specific_snp - only load the variant specified. [str]
* exclude_snp - exclude this single variant
* window_size - alters behaviour of `specific_snp` and `exclude_snp`
to include/exclude SNPs within +/- half of this distance (kb) are
also included. [float]
* range_resolution - sets the resolution of the (from, to) range.
Either bp, kb or mb. If set it will take the values from
`snp_bp_range`. [str/int/float]
* covariates_file - specify the covariates file with family and
individual IDs in the first two columns. Covariates are in the
(n+2)th column. Only used in conjunction with `covariate_filter`.
[file]
* covariate_filter - covariate columns value to filter on. Can be
used with non-numeric values to filter out individuals with
covariate =/= `covariate_filter` value. [str/int/float]
* covariate_column - column number to apply filtering to if more
than one covariate in the file. [int]
'''
statement = []
# map of keyword arguments recognised to Plink2 filtering flags
filter_map = {"genotype_rate": " --geno %s ",
"missingness": "--mind %s ",
"hwe": " --hwe %s ",
"min_allele_frequency": " --maf %s ",
"max_allele_frequency": " --max-maf %s ",
"mendelian_error": " --me %s ",
"keep": " --keep %s ",
"remove": " --remove %s ",
"quality_score_file": " --qual-scores %s ",
"qual_score_column": " %s ",
"var_id_col": " %s ",
"min_qual_score": " --qual-threshold %s ",
"max_qual_score": " --qual-max-threshold %s ",
"allow_no_sex": " --allow-no-sex ",
"enforce_sex": " --must-have-sex ",
"subset_filter": " --filter-%s ",
"extract": " --extract %s ",
"exclude": " --exclude %s ",
"chromosome": " --chr %s ",
"exclude_chromosome": " --not-chr %s ",
"autosome": " --autosome ",
"pseudo_autosome": " --autosome-xy ",
"ignore_indels": " --snps-only no-DI ",
"snp_id_range": " --from %s --to %s ",
"specific_snp": " --snp %s ",
"window_size": " --window %s ",
"exclude_snp": " --exclude-snp %s ",
"snp_bp_range": "--from-bp %s --to-bp %s ",
"covariates_file": " --filter %s ",
"covariate_filter": " %s ",
"covariate_column": " --mfilter %s ",
"missing_phenotype": " --prune ",
"conditional_snp": " --condition %s ",
"haplotype_size": " --blocks-max-kb %s ",
"haplotype_frequency": " --blocks-min-maf %s "}
# compile all filters together, checking for dependencies.
# use a mapping dictionary to extract the relevant flags and
# combinations to use.
filters = []
filter_dict = {}
for key, value in kwargs.items():
filter_dict[key] = value
# need to check for covariates and qual scores - these
# are more complex. Deal with these first and remove
# from dictionary once complete.
try:
assert filter_dict["quality_score_file"]
assert filter_dict["qual_score_column"]
assert filter_dict["var_id_col"]
quals = []
qual_file = filter_dict["quality_score_file"]
score_col = filter_dict["qual_score_column"]
id_col = filter_dict["var_id_col"]
quals.append(filter_map["quality_score_file"] % qual_file)
quals.append(filter_map["qual_score_column"] % score_col)
quals.append(filter_map["var_id_col"] % id_col)
# remove from dictionary
filter_dict.pop("qual_score_column", None)
filter_dict.pop("var_id_col", None)
filters.append(" ".join(quals))
except KeyError:
pass
try:
assert filter_dict["covariates_file"]
assert filter_dict["covariate_filter"]
covars = []
covar_file = filter_dict["covariates_file"]
covar_val = filter_dict["covariate_filter"]
covars.append(filter_map["covariates_file"] % covar_file)
covars.append(filter_map["covariate_filter"] % covar_val)
# check to filter on specific column numnber, default is 3rd file
# column, i.e. (n+2)th column
try:
assert filter_dict["covariate_column"]
covar_col = filter_dict["covariate_column"]
covars.append(filter_map["covariate_column"] % covar_col)
filter_dict.pop("covariate_column", None)
except KeyError:
pass
# remove from dictionary
filter_dict.pop("covariates_file", None)
filter_dict.pop("covariate_filter", None)
filters.append(" ".join(covars))
except KeyError:
pass
# range_resolution and snp_bp_range are used together
try:
assert filter_dict["snp_bp_range"]
flags = filter_map["snp_bp_range"]
from_pos = filter_dict["snp_bp_range"].split(",")[0]
to_pos = filter_dict["snp_bp_range"].split(",")[1]
filters.append(flags % (from_pos, to_pos))
# remove so they are not duplicated - source of bugs
filter_dict.pop("snp_bp_range", None)
except KeyError:
pass
for each in filter_dict.keys():
try:
assert filter_map[each]
# check for data type <- behaviour is type dependent
if type(filter_dict[each]) == bool:
filters.append(filter_map[each])
# handle multiple arguments in string format
elif len(filter_dict[each].split(",")) > 1:
vals = tuple(filter_dict[each].split(","))
filters.append(filter_map[each] % vals)
else:
filter_val = filter_dict[each]
filters.append(filter_map[each] % filter_val)
except KeyError:
E.warn("%s filter not recognised, please see "
"documentation for allowed filters" % each)
self.filters.append(" ".join(filters))
self.statement["filters"] = " ".join(self.filters)
def calc_ld(self, ld_statistic, ld_threshold,
ld_shape="table"):
'''
Calculate linkage disequilibrium between all SNP
pairs.
Arguments
---------
ld_statistic: string
The LD statistic to report, either correlation or squared correlation
of inter-variant allele counts
ld_threshold: float
minimum value to report for pair-wise LD
ld_window: int
max distance (in Kb) between SNPs for calculating LD
ld_shape: string
shape to use for reporting LD, either a table or a matrix. If a
matrix then either square, square with diagnonal (square0) or
triangular. Square matrices are symmetric.
'''
statement = []
ld_map = {"r": " --r %s dprime ",
"r2": "--r2 %s dprime "}
shape_map = {"table": "inter-chr gz",
"square": "square gz",
"square0": "square0 gz",
"triangle": "triangle gz"}
try:
statement.append(ld_map[ld_statistic] % shape_map[ld_shape])
except KeyError:
raise ValueError("%s LD statistic not recognised. Please "
"use eithr 'r' or 'r2'" % ld_statistic)
if type(ld_threshold) == float:
statement.append(" --ld-window-r2 %0.3f " % ld_threshold)
else:
E.warn("threshold type not recognised, setting to default "
"value of 0.2")
self.statement["tasks"] = " ".join(statement)
def _run_tasks(self, parameter=None, **kwargs):
'''
Plink2 is capable of much more than just running basic association
analyses.
These include file processing, reformating, filtering, data summaries,
PCA, clustering, GRM calculation (slow and memory intense), etc.
multiple tasks can be added by separate calls to this function.
For instance, adding phenotype and gender information using the
update_samples task whilst change the file format.
Tasks
-----
* change_format - convert from input format to an alternative format
after applying filters.
* change_missing_values - alters the genotype or phenotype missing
value into the value supplied.
* update_variants - use this to fill in missing variant IDs, useful
for data from exome or whole-genome sequencing that have
non-standard IDs.
* update_samples - update phenotype and sample information
* flip_strands - flip the strand for alleles, swaps A for T and
C for G.
* flip_scan - use the LD-based scan to check SNPs have not had
incorrect strand assignment. Particularly useful if cases and
controls were genotyped separately, or the cohort was genotyped
in different batches.
* sort - sort files by individual and/or family IDs
* merge - merge new filesets with reference fileset.
* merge_mode - handling of missing values and overwriting values
* find_duplicates - find and output duplicate variants based on bp position,
or variant ID. Useful to output for the --exclude filtering flag.
* remove_relations - remove one of a pair of individuals with IBS >=
a threshold. Recommended minimum is 3rd cousins (IBS >= 0.03125).
* check_gender - check imputed gender from non-pseudoautosomal X
chromsome genotypes against self-reported gender
* estimate_haplotypes - assign SNPs to haplotype blocks and get
positional information
'''
statement = []
# set up a dictionary of recognised tasks with key word argument
# values as further dictionaries. Use the parameter argument
# to pass arguments by value to string formatting
task_map = {'change_format': {"plink_binary": " --make-bed ",
"plink": " --recode ",
"oxford": " --recode oxford ",
"oxford_binary": " --recode oxford gen-gz ",
"raw": " --recode A tabx "},
"change_missing_values": {"genotype": " --missing-genotype %s ",
"phenotype": " --missing-phenotype %s "},
"update_variants": {"variant_ids": " --set-missing-var-ids %s ",
"missing_id": " --mising-var-code %s ",
"chromosome": " --update-chr %s ",
"centimorgan": " --update-cm %s ",
"name": " --update-name %s ",
"alleles": " --update-alleles %s ",
"map": " --update-map %s "},
"update_samples": {"sample_ids": " --update-ids %s ",
"parents": " --update-parents %s ",
"gender": " --update-sex %s %s "},
"flip_strands": {"all_samples": " --flip %s ",
"subset": " --flip-subset %s "},
"flip_scan": {"default": " --flip-scan verbose ",
"window": "--flip-scan --flip-scan-window %s ",
"kb": " --flip-scan --flip-scan-window-kb %s ",
"threshold": " --flip-scan --flip-scan-threshold %s "},
"sort": {"none": " --indiv-sort %s ",
"natural": " --indiv-sort %s ",
"ascii": " --indiv-sort %s ",
"file": " --indiv-sort %s "},
"merge": {"plink": " --merge %s ",
"binary_plink": " --bmerge %s "},
"merge_mode": {"default": " --merge-mode 1 ",
"orginal_missing": " --merge-mode 2 ",
"new_nonmissing": " --merge-mode 3 ",
"no_overwrite": " --merge-mode 4 ",
"force": " --merge-mode 5 ",
"report_all": " --merge-mode 6 ",
"report_nonmissing": " --merge-mode 7"},
"find_duplicates": {"same_ref": " --list-duplicate-vars require-same-ref ",
"id_match": " --list-duplicate-vars ids-only ",
"suppress_first": " --list-duplicate-vars suppress-first"},
"remove_relations": {"cutoff": " --rel-cutoff %s "},
"check_gender": " --check-sex ",
"pca": " --pca %s ",
"estimate_haplotypes": " --blocks "}
for task, value in kwargs.items():
# check for PCA first as it is not nested in task_map
if task == "pca":
try:
state = task_map[task] % value
statement.append(state)
except TypeError:
statement.append(task_map[task])
statement.append
elif task == "check_gender":
statement.append(task_map[task])
elif task == "estimate_haplotypes":
statement.append(task_map[task])
elif task != "parameter":
try:
# sub_task is a nested dictionary
sub_task = task_map[task]
try:
assert sub_task[value]
try:
# gender has two string formats
if value == "gender":
gcol = 1
statement.append(sub_task[value] % (parameter,
gcol))
else:
# some tasks do not contain task values for the
# parameter argument - catch these with the TypeError
# exception
statement.append(sub_task[value] % parameter)
# the default for parameter is None, check this is appropriate
if not parameter:
E.warn("Parameter value is set to NoneType. "
"Please check this is an appropriate value "
"to pass for this task")
else:
pass
except TypeError:
statement.append(sub_task[value])
except KeyError:
raise KeyError("No sub task found, see docs for details of "
"recognised tasks")
except KeyError:
raise KeyError("Task not recognised, see docs for details of "
"recognised tasks")
else:
pass
# handle multiple tasks for a single run
try:
curr_tasks = self.statement["tasks"]
new_tasks = " ".join(statement)
self.statement["tasks"] = " ".join([curr_tasks, new_tasks])
except KeyError:
self.statement["tasks"] = " ".join(statement)
def _output_statistics(self, **kwargs):
'''
Summary statistics are written to specific files dictated by the
type of statistic
Statistics
----------
* allele_frequency - writes out MAF to `plink`.frq, this can be
modified with specific keywords.
* missing_data - generates a report of data missingness, can be subset
into within family and/or cluster reports
* hardy_weinberg - calculates all HWE p-values using exact test
statistics. For case/control studies reports are written for case,
controls and combined.
* mendel_errors - generates a Mendelian error report across all trios.
There are 10 different codes responding to different Mendelian error
scenarios.
* inbreeding - calculate observed and expected homozygosity across
individuals and F statistics. If the sample size is small then a
file of MAFs is required. Inbreeding coefficients can also be
reported on request using inbreeding_coef.
* gender_checker - checks gender assignment against X chromosome
genotypes. Gender values can also be imputed based on genotype
information using gender_impute.
* wrights_fst - calculate Wright's Fst statistic given a set of
subpopulations for each autosomal diploid variant. Used in
conjunction with the --within flag.
'''
stats_map = {"allele_frequency": " --freq %s ",
"missing_data": " --missing %s ",
"hardy_weinberg": " --hardy midp ",
"mendel_errors": " --mendel %s ",
"inbreeding": " --het %s ",
"inbreeding_coef": " --ibc ",
"gender_checker": " --check-sex ",
"gender_impute": " --impute-sex ",
"wrights_fst": " --fst --within %s ",
"case_control_fst": "--fst %s "}
statement = []
for key, value in kwargs.tems():
if value:
try:
assert stats_map[key]
statement.append(stats_map[key] % value)
except KeyError:
raise KeyError("statistic not recognised. Please "
"consult the documentation for allowed "
"options.")
else:
try:
assert stats_map[key]
flag = stats_map[key].rstrip("%s ")
statement.append(flag)
except KeyError:
raise KeyError("statistic not recognised. Please "
"consult the documentation for allowed "
"options.")
self.statement["stats"] = " ".join(statement)
def run_association(self, association=None, model=None,
run_options=None,
permutation=False, n_perms=None,
random_seed=None, permutation_options=None,
covariates_file=None, covariates=None):
'''
Construct a statement for a plink2 association analysis.
QC filters are constructed from input during instantiation.
run options include redirecting logging output, using parallelisation,
defining number of threads to use, etc
The default association uses the --assoc flag. Plink will check
phenotype coding, if it is not case/control it assumes
it is a continuous trait and uses linear regression.
Alternative regression models that include covariates can be used,
i.e. logistic and linear regression.
key
***
{CC} - applies to case/control analysis only
{quant} - applies to quantitative trait only
{CC/quant} - applies to both
run_options
-----------
``--assoc``:
* `fisher | fisher-midp` - uses Fisher's exact test to calculate
association p-values or applies Lancaster's mid-p adjustment. {CC}
* `counts` - causes --assoc to report allele counts instead of
frequencies. {CC}
* `set-test` - implements and tests the significance of variant
sets. See documentation below. {CC/quant}
* `qt-means` - generates a .qassoc.means file reporting trait means
and standard deviations by genotype. {quant}
* `lin` - reports the Lin et al (2006) statistic to be reported. If
multiple testing adjustments and/or permutation is also used, they
will be based on this statistic. {quant}
``--model``:
* `fisher | fisher-midp | trend-only` - uses Fisher's exact test
to calculate association p-values or applies Lancaster's mid-p
adjustment. trend-only forces only a trend test to be performed.
{CC}
* `dom | rec | gen | trend` - use the specified test as the basis
for the model permutation. If none are defined the result with the
smallest p-value is reported. {CC}
* --cell - sets the minimum number of observations per cell in the
2x3 contingency table. The default is 0 with the Fisher and
Fiser-midp test, otherwise 5. {CC}
``--linear/logistic``:
* `set-test` - implements and tests the significance of variant
sets. See documentation below. {CC/quant}
* `hide-covar` - removes the covariate specific sections from the
results output. {CC/quant
* `sex | no-x-sex` - `sex` adds sex as covariate to all models,
whislt `no-x-sex` does not include gender into X-chromosome SNP
models. {CC/quant}
* `interaction` - adds in genotype X covariate interaction terms
into the model. Can only be used with permutation is ``--tests``
is also specified. {CC/quant}
* `beta` - reports the beta coefficients instead of the OR in a
logistic model. {CC}
* `standard-beta` - standardizes the phenotype and all predictor
variables to zero mean and unit variance prior to regression
(separate for each variant analysed). {quant}
* `intercept` - includes the intercept in the output results.
{quant}
model
-----
* `recessive` - `recessive` specifies the model assuming the A1 allele
as recessive. {CC/quant}
* `dominant` - `dominant` specifies the model assuming the A1 allele is
dominant. {CC/quant}
* `genotype` - `genotype` adds an additive effect/dominance deviation
2df joint test with two genotype variables in the test (coded 0/1/2
and 0/1/0). {CC/quant}
* `trend` - forces a trend test to be performed. {CC/quant}
* `hethom` - `hethom` uses 0/0/1 and 0/1/0 instead of the genotype
coding. With permutation it will be based on the joint test instead
of just the additive effects. This can be overriden using the
`--tests` flag. {CC/quant}
* `no-snp` - `no-snp` defines a regression of phenotype on covariates
without reference to genotype data, except where `--conditon{-list}`
is specified. If used with permuation, test results will be reported
for every covariate. {CC/quant}
permutation
-----------
If permutation is True, run an adaptive Monte Carlo permutation test.
If n_perms is set, this will run a max(T) permutation test with the n
replications. A random seed will need to be provided.
* `perm-count` - this alters the permutation output report to include
counts instead of frequencies
covariates
----------
These should be provided in a separate file. Specifying which
covariates to include can be done as either a comma-separated list
of covariate names or numbers. These numbers will correspond to the
(n+2)th covariate file column as per the plink documentation.
'''
# model map maps common option effects onto specific syntax
model_map = {"--logistic": {"recessive": "recssive",
"dominant": "dominant",
"genotype": "genotypic"},
"--linear": {"recessive": "recssive",
"dominant": "dominant",
"genotype": "genotypic"},
"--model": {"recessive": "rec",
"dominant": "dom",
"genotype": "gen"}}
statement = []
# construct analysis flags
# add model, i.e. additive, recessive, dominant, etc.
# see docstring for details. Make sure correct modifier is used
# with a mapping dictionary
if association == "logistic":
statement.append(" --logistic ")
m_map = model_map["--logistic"]
if model:
statement.append(m_map[model])
else:
pass
elif association == "linear":
statement.append(" --linear ")
m_map = model_map["--linear"]
if model:
statement.append(m_map[model])
else:
pass
elif association == "model":
statement.append(" --model ")
m_map = model_map["--model"]
statement.append(m_map[model])
else:
statement.append(" --assoc ")
# add in run options. These need to be in their correct
# format already
if run_options:
modifiers = " ".join(run_options)
statement.append(modifiers)
else:
pass
# permutation should have a random seed set by the user. Allow
# this to set it's own seed if one not provided, but report it in
# the log file
if permutation:
try:
assert random_seed
except AssertionError:
rand_seed = random.randint(0, 100000000)
E.warn("No seed is provided for the permutation test. "
"Setting seed to %s. Record this for future "
"replicability" % random_seed)
if n_perms:
statement.append(" mperm=%i --seed %s " % (n_perms,
random_seed))
else:
statement.append(" perm --seed %s " % (random_seed))
else:
pass
# if using linear or logistic, covariates can be added into the model
# to adjust for their effects - assumes fixed effects of covariates
# mixed models are not yet implemented in Plink2.
if covariates:
covars = covariates.split(",")
if len(covars) > 1:
if type(covars[0]) == str:
m_covar = " --covar-name %s " % covariates
elif type(covars[0]) == int:
m_covar = " --covar-number %s " % covariates
else:
# if none are specified then don't adjust the model for any
# and log a warning
E.warn("Covariate header or numbers are not recognised."
"No covariates will be included in the model. Please"
"specifiy them exactly")
covariates = None
covariates_file = None
elif len(covars) == 1:
if type(covars) == str:
m_covar = " --covar-name %s " % covariates
elif type(covars) == int:
m_covar = " --covar-number %i " % covariates
else:
# if none are specified then don't adjust the model for any
# and log a warning
E.warn("Covariate header or numbers are not recognised."
"No covariates will be included in the model. Please"
"specifiy them exactly")
covariates = None
covariates_file = None
if covariates and covariates_file:
statement.append(" --covar %s %s " % (covariates_file,
m_covar))
elif covariates and not covaries_file:
E.warn("No covariate file specified. None included in model.")
elif covariates_file and not covariates:
E.warn("No covariates specified to include in the model."
"None included")
else:
pass
self.statement["assoc"] = " ".join(statement)
def PCA(self, n_pcs="20"):
'''
Perform PCA analysis on previosly generated GRM, output the number n
principal componets, default = 20
'''
self._run_tasks(pca=n_pcs)
def _dimension_reduction(self, **kwargs):
'''
Use PCA to perform dimensionality reduction on
input samples. A PCA can be calculated using
a subset of samples which can then be projected on
to other samples.
'''
# FINISH ME!!!!
def _detect_interactions(self, method=None, modifier=None,
set_file=None, set_mode=None,
report_threshold=None,
sig_threshold=None,
covariates_file=None, covariates=None):
'''
Detect epistatic interactions between SNPs using either an inaccurate
scan (fast-epistasis) or a fully saturated linear model
Methods
-------
fast_epistasis - uses an "imprecise but fast" scan of all 3x3 joint genotype
count tables to test for interactions. Can be modified to use a likelihood
ration test `boost` or a joint-effects test `joint-effects`. Default is
`joint-effects`.
epistasis - uses a linear model to test for interactions between additive
effects after main effects. Logistic regression for case/control and
linear regression for quantitative traits.
two_locus - tests a single interaction between two variants using joint genotype
counts and frequencies.
adjusted - allows adjustment for covariates in the interaction test, and also adjusts
for main effects from both the test and target SNP. Requires and R plugin script.
'''
interact_map = {"fast_epistasis": " --fast-epistasis %s ",
"epistasis": " --epistasis %s ",
"two_locus": " --twolocus %s ",
"adjusted": " --R %s "}
statement = []
if modifier:
statement.append(interact_map[method] % modifier)
else:
modifier = ""
statement.append(interact_map[method] % modifier)
if covariates_file:
statement.append("--covar %s --covar-name %s " % (covariates_file,
covariates))
else:
pass
if set_mode and set_file:
# does not work with two-locus test
if method == "two_locus" and set_mode:
E.warn("Two locus test cannot be used in conjunction "
"with a set-based test.")
elif set_mode:
statement.append(" %s --set %s " % (set_mode, set_file))
else:
pass
else:
pass
# alter reporting of significant interactions and significance
# level of interactions
if report_threshold:
statement.append(" --epi1 %0.3f " % float(report_threshold))
else:
pass
if sig_threshold:
statement.append(" --epi2 %0.3f " % float(sig_threshold))
else:
pass
self.statement["epistasis"] = " ".join(statement)
def _matrices(self, matrix_type, shape="triangle", compression=None, options=None):
'''
Calculate a number of different distance matrices:
realised genetic relationship matrix
relationship covariance matrix
identity by descent/state matrix
hamming distance matrix
* matrix_type - matrix to compute. Can be either IBS, 1 - IBS,
Hamming, GRM
'''
statement = []
if matrix_type == "hamming":
flag = " --distance "
elif matrix_type == "ibs":
flag = " --distance ibs "
elif matrix_type == "genomic":
flag = " --distance 1-ibs "
elif matrix_type == "grm":
flag = " --make-grm-bin "
if options:
statement.append(" ".join([flag, shape, compression, options]))
elif matrix_type == "grm":
statement.append(flag)
else:
statement.append(" ".join([flag, shape, compression]))
return " ".join(statement)
def _qc_methods(self, parameter=None, **kwargs):
''''
Perform QC on genotyping data, SNP-wise and sample-wise.
All arguments are passed as key word arguments, except
cases detailed in `Parameters` where they are passed with
the ``parameter`` argument.
Methods
-------
* ld_prune - generate a list of SNPs in linkage equilibrium by
pruning SNPs on either an LD statistic threshold, i.e. r^2,
or use a variance inflation factor (VIF) threshold
* heterozygosity - calculate average heterozygosity from each
individual across a set of SNPs, threshold on individuals
with deviation from expected proportions
* ibd - calculate the genetic relationship of individuals to
infer relatedness between individuals, threshold on given
degree of relatedness, e.g. IBD > 0.03125, 3rd cousins
* genetic_gender - estimate the gender of an individual
from the X chromosome genotypes - correlate with reported
gender and output discrepancies
* ethnicity_pca - perform PCA using a subset of independent
SNPs to infer genetic ancestry. Compare and contrast this
to individuals reported ancestry. Report discrepancies
and individuals greater than a threshold distance away
from a reference population.
* homozygosity - identifies sets of runs of homozygosity
within individuals. These may be indicative of inbreeding,
systematic genotyping errors or regions under selection.
Parameters
----------
Method parameters can also be passed through this function
as keyword=value pairs.
* ld_prune:
`kb` - this modifier changes the window resolution to kb
rather than bp.
`r2` - the r^2 threshold above which SNPs are to be removed
`vif` - the VIF threshold over which SNPs will be removed
`window` - window size to calculate pair-wise LD over
`step` - step size to advance window by
'''
qc_dict = {"ld_prune": {"R2": " --indep-pairwise %s %s %s ",
"VIF": " --indep %s %s %s "},
"heterozygosity": {"gz": " --het gz",
"raw": " --het "},
"ibd": {"relatives": " --genome gz rel-check ",
"full": " --genome gz full ",
"norm": " --genome gz "},
"genetic_gender": "none",
"ethnicity_pca": "none",
"homozygosity": {"min_snp": " --homozyg-snp %s ",
"min_kb": " --homozyg-kb %s ",
"default": " --homozyg ",
"density": " --homozyg-density ",
"set_gap": " --homozyg-gap ",
"snp_window": " --homozyg-window-snp %s ",
"het_max": " --homozyg-het %s "}}
task_dict = {}
state = []
# put everything in an accessible dictionary first
for task, value in kwargs.items():
task_dict[task] = value
# LD pruning can be passed multiple parameters,
# handle this separately
try:
sub_task = task_dict["ld_prune"]
ld_prune_task = qc_dict["ld_prune"]
try:
step = task_dict["step"]
except KeyError:
raise AttributeError("No step size found, please "
"pass a step size to advance the "
"window by")
try:
window = task_dict["window"]
try:
task_dict["kb"]
window = "".join([window, "kb"])
task_dict.pop("kb", None)
except KeyError:
pass
except KeyError:
raise AttributeError("No window size found. Please input "
"a window size to prune over")
try:
threshold = task_dict["threshold"]
except KeyError:
raise AttributeError("No threshold value, please input "
"a value to LD prune SNPs on")
# add in the kb if it is passed as an argument
state.append(ld_prune_task[sub_task] % (window, step, threshold))
task_dict.pop("threshold", None)
task_dict.pop("ld_prune", None)
task_dict.pop("window", None)
task_dict.pop("step", None)
except KeyError:
pass
for task, value in task_dict.items():
try:
sub_task = qc_dict[task]
try:
state.append(sub_task[value] % parameter)
except TypeError:
state.append(sub_task[value])
except KeyError:
raise AttributeError("Task not found, please see "
"documentation for available features")
self.statement["QC"] = " ".join(state)
def build_statement(self, infiles, outfile, threads=None,
memory="60G", parallel=None):
'''
Build statement and execute from components
'''
statement = []
exec_state = self.executable
# calls to function add to the self.statement dictionary
try:
statement.append(self.statement["program"])
except KeyError:
raise AttributeError("Input files and format not detected")
try:
statement.append(self.statement["QC"])
except KeyError:
pass
try:
statement.append(self.statement["filters"])
except KeyError:
pass
try:
statement.append(self.statement["tasks"])
except KeyError:
pass
try:
statement.append(self.statement["stats"])
except KeyError:
pass
try:
statement.append(self.statement["assoc"])
except KeyError:
pass
try:
statement.append(self.statement["matrix"])
except KeyError:
pass
try:
statement.append(self.statement["epistasis"])
except KeyError:
pass
if threads:
statement.append(" --threads %i " % threads)
else:
pass
if not memory:
pass
elif memory != "60G":
memory = int(memory.strip("G")) * 1000
statement.append(" --memory %i " % memory)
else:
statement.append(" --memory 60000 ")
# add output flag
# outfile needs to be complete path for Plink to save
# results properly - check if it starts with '/',
# if so is already a full path
if not parallel:
if os.path.isabs(outfile):
statement.append(" --out %s " % outfile)
else:
outpath = "/".join([os.getcwd(), outfile])
statement.append(" --out %s " % outpath)
os.system(" ".join(statement))
else:
# parallelisation only really applies to GRM calculation
# at the moment <- need to generalise
# if parallelisation is used, invoke temp files
# then agglomerate files
statements = []
if os.path.isabs(outfile):
outpath = outfile
else:
outpath = "/".join([os.getcwd(), outfile])
for i in range(1, parallel+1):
# copy list, assigning just makes a pointer
p_state = statement[:]
p_state.append(" --parallel %i %i " % (i, parallel))
p_state.append(" --out %s.%i " % (outpath, i))
statements.append(" ".join(p_state))
os.system(";".join(statements))
class PlinkDev(Plink2):
'''
Run various Plink functions and analysis, including file processing, GRM
calculation, PCA and other GWA tasks
Require Plink v1.9_devel to be in the users PATH variable as ``plinkdev`` to
distinguish it from Plink v1.07 and v1.9.
Currently uses Nov 11 development build.
'''
def __init__(self, files, options=None,
settings=None, design=None):
self.infiles = files
self.options = options
self.settings = settings
self.design = design
self.executable = "plinkdev"
self.statement = {}
self.filters = []
class GWASResults(object):
'''
A class for handling the results from a GWA, used for plotting
and post-analysis QC
'''
def __init__(self, assoc_file, **kwargs):
# if the assoc_file is a list of multiple files,
# then merge them into a single files
if type(assoc_file) == list and len(assoc_file) > 1:
E.info("multiple results files detected")
self.infiles = assoc_file
self.infile = None
self.results = self.parse_genome_wide(assoc_file)
else:
E.info("single results file detected")
self.infile = assoc_file
self.infiles = None
# results is a pandas dataframe to operate on
self.results = self.get_results(assoc_file, **kwargs)
def parse_genome_wide(self, association_files):
'''
Accept a list of results files, merge them together
and output as a single dataframe
Will this take a lot of memory??
'''
file0 = association_files.pop(0)
df = self.get_results(file0)
for afile in association_files:
_df = self.get_results(afile)
df = df.append(_df)
df["CHR"] = df["CHR"].astype(np.int64)
df.sort_values(by=["CHR", "BP"], inplace=True)
return df
def get_results(self, association_file,
epistasis=False,
file_format="plink"):
'''
Parse a GWA or epistasis results file and return the table
'''
# use Pandas for now - try something different later
# SQLite DB maybe?
# inconsistent number of white spaces between
# fields means Pandas parsing breaks down
# fields need to be the correct data type,
# i.e. BP = int, P = float, SNP = str, etc
# if the file has already been parsed and processed
# just assign it instead
# epistasis results don't have a header
try:
peek = pd.read_table(association_file, nrows=5,
sep="\s*", header=0,
index_col=None,
engine='python')
except StopIteration:
peek = pd.read_table(association_file, nrows=5,
sep="\t", header=0,
index_col=None)
if epistasis:
try:
results_frame = pd.read_table(association_file,
sep="\s*", header=0,
index_col=None)
except StopIteration:
results_frame = pd.read_table(association_file,
sep="\t", header=0,
index_col=None)
# results from fast epistasis are different to others
if file_format == "cassi_covar":
if results_frme.shape[1] == 12:
results_frame.columns = ["SNP1", "CHR1", "ID1", "BP1",
"SNP2", "CHR2", "ID2", "BP2",
"OR", "SE", "STAT", "P"]
elif results_frame.shape[1] == 14:
results_frame.columns = ["SNP1", "CHR1", "ID1", "BP1",
"SNP2", "CHR2", "ID2", "BP2",
"OR", "SE", "STAT", "P",
"CASE_RSQ", "CTRL_RSQ"]
elif results_frame.shape[1] == 16:
results_frame.columns = ["SNP1", "CHR1", "ID1", "BP",
"SNP2", "CHR2", "ID2", "BP2",
"OR", "SE", "STAT", "P",
"CASE_RSQ", "CTRL_RSQ",
"CASE_DPRIME" "CTRL_DPRIME"]
results_frame.loc[:, "BP"] = pd.to_numeric(results_frame["BP"],
errors="coerce")
elif file_format == "cassi":
pass
elif file_format == "plink":
if results_frame.shape[1] == 7:
results_frame.columns = ["CHR1", "SNP1", "CHR",
"SNP", "OR", "STAT", "P"]
elif results_frame.shape[1] == 9:
results_frame.columns = ["CHR", "SNP", "BP", "A1", "NMISS",
"OR", "SE", "STAT", "P"]
else:
results_frame.columns = ["CHR", "SNP", "BP", "A1", "OR",
"SE", "STAT", "P"]
results_frame.loc[:, "BP"] = pd.to_numeric(results_frame["BP"],
errors="coerce")
results_frame.loc[:, "P"] = pd.to_numeric(results_frame["P"],
errors="coerce")
return results_frame
else:
try:
assert peek["log10P"].any()
results_frame = pd.read_table(association_file,
sep="\t", header=0,
index_col=None,
dtype={"BP": np.int64,
"NMISS": np.int64})
return results_frame
except KeyError:
pass
l_count = 0
E.info("parsing file: %s" % association_file)
with open(association_file, "r") as ifile:
for line in ifile:
# check if spacing is whitespace or tab
if len(line.split(" ")) > 1:
parsed = line.split(" ")
elif len(line.split("\t")) > 1:
parsed = line.split("\t")
else:
raise IOError("file separator not recognised. "
"Must be whitespace or tab")
# remove multiple blank spaces
for i in range(parsed.count('')):
parsed.remove('')
# get rid of the newline
try:
parsed.remove('\n')
except ValueError:
parsed = [(px).rstrip("\n") for px in parsed]
if l_count == 0:
header = [iy.upper() for ix, iy in enumerate(parsed)]
head_idx = [ix for ix, iy in enumerate(parsed)]
map_dict = dict(zip(head_idx, header))
res_dict = dict(zip(header, [[] for each in header]))
l_count += 1
else:
col_idx = [lx for lx, ly in enumerate(parsed)]
col = [ly for lx, ly in enumerate(parsed)]
for i in col_idx:
res_dict[map_dict[i]].append(col[i])
l_count += 1
# substract one from the index for the header column
df_idx = range(l_count-1)
results_frame = pd.DataFrame(res_dict, index=df_idx)
results_frame.fillna(value=1.0, inplace=True)
try:
results_frame = results_frame[results_frame["TEST"] == "ADD"]
except KeyError:
pass
# need to handle NA as strings
results_frame["P"][results_frame["P"] == "NA"] = 1.0
results_frame["BP"] = [int(bx) for bx in results_frame["BP"]]
results_frame["P"] = [np.float64(fx) for fx in results_frame["P"]]
try:
results_frame["STAT"][results_frame["STAT"] == "NA"] = 1.0
results_frame["STAT"] = [np.float64(sx) for sx in results_frame["STAT"]]
except KeyError:
try:
results_frame["CHISQ"][results_frame["CHISQ"] == "NA"] = 1.0
results_frame["CHISQ"] = [np.float64(sx) for sx in results_frame["CHISQ"]]
except KeyError:
try:
results_frame["T"][results_frame["T"] == "NA"] = 1.0
results_frame["T"] = [np.float64(sx) for sx in results_frame["T"]]
except KeyError:
pass
try:
results_frame["F_U"][results_frame["F_U"] == "NA"] = 0.0
results_frame["F_U"] = [np.float64(ux) for ux in results_frame["F_U"]]
except KeyError:
pass
try:
results_frame["F_A"][results_frame["F_A"] == "NA"] = 0.0
results_frame["F_A"] = [np.float64(ax) for ax in results_frame["F_A"]]
except KeyError:
pass
try:
results_frame["FREQ"][results_frame["FREQ"] == "NA"] = 0.0
results_frame["FREQ"] = [np.float64(fx) for fx in results_frame["FREQ"]]
except KeyError:
pass
try:
results_frame["OR"][results_frame["OR"] == "NA"] = 1.0
results_frame["OR"] = [np.float64(ox) for ox in results_frame["OR"]]
except KeyError:
try:
results_frame["BETA"][results_frame["BETA"] == "NA"] = 1.0
results_frame["BETA"] = [np.float64(ox) for ox in results_frame["BETA"]]
except KeyError:
results_frame["B"][results_frame["B"] == "NA"] = 0.0
results_frame["B"] = [np.float64(ox) for ox in results_frame["B"]]
return results_frame
def plotManhattan(self, save_path, resolution="chromosome",
write_merged=True, sig_level=8):
'''
Generate a basic manhattan plot of the association results
Just deal with chromosome-by-chromosome for now.
'''
# use the python ggplot plotting package
# need to calculate -log10P values separately
self.results["log10P"] = np.log10(self.results["P"])
# or using rpy2
py2ri.activate()
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''suppressPackageStartupMessages(library(scales))''')
R('''suppressPackageStartupMessages(library(qqman))''')
R('''sink(file="sink.text")''')
r_df = py2ri.py2ri_pandasdataframe(self.results)
R.assign("assoc.df", r_df)
if resolution == "chromosome":
R('''assoc.df$CHR <- factor(assoc.df$CHR, '''
'''levels=levels(ordered(unique(assoc.df$CHR))),'''
'''labels=unique(paste0("chr", assoc.df$CHR)))''')
R('''nchrom <- length(unique(assoc.df$CHR))''')
R('''myCols <- rep(c("#ca0020", "#404040"), nchrom)[1:nchrom]''')
R('''names(myCols) <- sort(unique(assoc.df$CHR))''')
R('''colScale <- scale_colour_manual(name = "CHR", values=myCols)''')
R('''bp_indx <- seq_len(dim(assoc.df[1]))''')
R('''assoc.df$BPI <- bp_indx''')
R('''p <- ggplot(assoc.df, aes(x=BPI, y=-log10(P), colour=CHR)) + '''
'''geom_point(size=1) + colScale + '''
'''geom_hline(yintercept=6, linetype="dashed", colour="blue") + '''
'''theme_bw() + labs(x="Chromosome position (bp)", '''
'''y="-log10 P-value") + facet_grid(~CHR, scale="free_x") + '''
'''theme(axis.text.x = element_text(size=8))''')
R('''png("%s", res=90, unit="in", height=8, width=12)''' % save_path)
R('''print(p)''')
R('''dev.off()''')
elif resolution == "genome_wide":
R('''nchroms <- length(unique(assoc.df$CHR))''')
R('''png("%s", width=720, height=540)''' % save_path)
R('''p <- manhattan(assoc.df, main="Manhattan plot",'''
'''ylim=c(0, 50), cex=0.9, suggestiveline=T,'''
'''genomewideline=-log10(5e-8), chrlabs=c(1:nchroms), '''
'''col=c("#8B1A1A","#8470FF"))''')
R('''print(p)''')
R('''dev.off()''')
R('''sink(file=NULL)''')
if write_merged:
return self.results
else:
return False
def plotQQ(self, save_path, resolution="chromosome"):
'''
Generate a QQ-plot of expected vs. observed
test statistics
'''
self.results["log10P"] = np.log(self.results["P"])
py2ri.activate()
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''suppressPackageStartupMessages(library(scales))''')
R('''suppressPackageStartupMessages(library(qqman))''')
r_df = py2ri.py2ri_pandasdataframe(self.results)
R.assign("assoc.df", r_df)
R('''png("%s", width=720, height=540)''' % save_path)
R('''qq(assoc.df$P)''')
R('''dev.off()''')
def plotEpistasis(self, save_path, resolution="chromosome"):
'''
Generate both manhattan plot of the SNPs tested for
epistasis with their target SNP, and a QQplot
of the association test p-values
'''
# plot QQplot
qq_save = "_".join([save_path, "qqplot.png"])
self.plotQQ(qq_save)
manhattan_save = "_".join([save_path, "manhattan.png"])
self.plotManhattan(manhattan_save,
resolution=resolution,
sig_level=6,
write_merged=False)
def getHits(self, threshold=0.00000005):
'''
Pull out regions of association by selecting
all SNPs with association p-values less than
a certain threshold. Defaults is genome-wide
signifance, p < 5x10-8.
Then select region +/- 1.5Mb of the index SNP.
'''
hits_df = self.results[self.results["P"] <= threshold]
# find the range of SNPs with 3Mb of each index SNP
contig_group = hits_df.groupby(["CHR"])
# there may be multiple independent hits on a given
# chromosome. Need to identify independent regions.
# Independent regions are defined by their statistical
# independence, not distance. Just take all SNPs
# in 3Mb of the lead SNP for each signal
# this will create overlaps of associatation signals
for contig, region in contig_group:
region.index = region["BP"]
chr_df = self.results[self.results["CHR"] == contig]
chr_df.index = chr_df["BP"]
# find independent regions and output consecutively
# if only a single SNP above threshold then there is
# only one independent region!!
if len(region) > 1:
independents = self.findIndependentRegions(region)
indi_group = independents.groupby("Group")
else:
region["Group"] = 1
indi_group = region.groupby("Group")
for group, locus in indi_group:
# if there is only a single variant should
# the region be kept? Likely a false
# positive
if min(locus["BP"]) == max(locus["BP"]):
pass
else:
try:
try:
locus.loc[:, "STAT"] = abs(locus["STAT"])
locus.sort_values(by="STAT", inplace=True)
except KeyError:
locus.loc[:, "T"] = abs(locus["T"])
locus.sort_values(by="STAT", inplace=True)
except KeyError:
locus.sort_values(by="CHISQ", inplace=True)
index_bp = locus.iloc[0]["BP"]
E.info("Lead SNP for regions is: {}".format(locus.iloc[0]["SNP"]))
left_end = min(chr_df.loc[chr_df.index >= index_bp - 1500000, "BP"])
right_end = max(chr_df.loc[chr_df.index <= index_bp + 1500000, "BP"])
range_df = chr_df.loc[left_end: right_end, :]
max_stat = max(abs(range_df["STAT"]))
yield contig, range_df
def extractSNPs(self, snp_ids):
'''
Extract a specific set of SNP results
Arguments
---------
snp_ids: list
a list of SNP IDs to extract from the
GWAS results
Returns
-------
snp_results: pandasd.Core.DataFrame
'''
self.results.index = self.results["SNP"]
snp_results = self.results.loc[snp_ids]
return snp_results
def findIndependentRegions(self, dataframe):
'''
Find the number of independent regions on
a chromsome. Uses R distance and tree
cutting functions
'''
# mong dataframe into R
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(dataframe)
R.assign("rdf", r_df)
R('''mat <- as.matrix(rdf$BP)''')
# get distances then cluster, chop tree at 1x10^7bp
R('''dist.mat <- dist(mat, method="euclidean")''')
R('''clusts <- hclust(dist.mat, "average")''')
R('''cut <- cutree(clusts, h=1e6)''')
R('''out.df <- rdf''')
R('''out.df$Group <- cut''')
# need to handle changes in pandas2ri API
try:
regions_df = pd.DataFrame(py2ri.ri2py(R["out.df"]))
except NotImplementedError:
regions_df = pd.DataFrame(R["out.df"])
return regions_df
def mergeFrequencyResults(self, freq_dir, file_regex):
'''
Merge GWAS results with frequency information,
and format for GCTA joint analysis input
'''
# create a dummy regex to compare
# file_regex type against
test_re = re.compile("A")
if type(file_regex) == str:
file_regex = re.compile(file_regex)
elif type(file_regex) == type(test_re):
pass
else:
raise TypeError("Regex type not recognised. Must"
"be string or re.SRE_Pattern")
all_files = os.listdir(freq_dir)
freq_files = [fx for fx in all_files if re.search(file_regex, fx)]
gwas_df = self.results
df_container = []
for freq in freq_files:
freq_file = os.path.join(freq_dir, freq)
E.info("Adding information from {}".format(freq_file))
# files may or may not be tab-delimited
try:
_df = pd.read_table(freq_file,
sep="\s*", header=0,
index_col=None,
engine='python')
except StopIteration:
_df = pd.read_table(freq_file,
sep="\t", header=0,
index_col=None)
merge_df = pd.merge(self.results, _df,
left_on=["CHR", "SNP"],
right_on=["CHR", "SNP"],
how='left')
df_container.append(merge_df)
count = 0
for df in df_container:
if not count:
gwas_df = df
count += 1
else:
gwas_df = gwas_df.append(df)
E.info("Calculating Z scores and SEs")
z_scores = -0.862 + np.sqrt(0.743 - 0.2404 *
np.log(gwas_df.loc[:, "P"]))
se = np.log(gwas_df.loc[:, "OR"])/z_scores
gwas_df.loc[:, "Z"] = z_scores
gwas_df.loc[:, "SE"] = se
gwas_df.loc[:, "logOR"] = np.log(gwas_df.loc[:, "OR"])
out_cols = ["SNP", "A1_x", "A2", "MAF", "logOR", "SE", "P", "NMISS"]
out_df = gwas_df[out_cols]
# need to remove duplicates, especially those
# that contain NaN for A2 and MAF
out_df = out_df.loc[~np.isnan(out_df["MAF"])]
return out_df
##########################################################
# unbound methods that work on files and data structures #
##########################################################
def plotMapPhenotype(data, coords, coord_id_col, lat_col,
long_col, save_path, xvar, var_type,
xlabels=None, level=None):
'''
Generate a map of the UK, with phenotype data overlaid
'''
# merge co-ordinate data with phenotype data
merged_df = pd.merge(left=coords, right=data, left_on=coord_id_col,
right_on=coord_id_col, how='inner')
# pheno column and set level of categorical variable
if xlabels and var_type == "categorical":
# convert to string type as a categorical variable
# drop NA observations from the merged data frame
na_mask = pd.isnull(merged_df.loc[:, xvar])
merged_df = merged_df[~na_mask]
rvar = merged_df.loc[:, xvar].copy()
nvar = pd.Series(np.nan_to_num(rvar), dtype=str)
var = [v for v in set(nvar)]
var.sort()
# recode the variables according to the input labels
xlabs = xlabels.split(",")
lbls = [str(xlabs[ix]) for ix in range(len(var))]
for xv in range(len(var)):
nvar[nvar == var[xv]] = lbls[xv]
merged_df.loc[:, "cat_var"] = nvar
else:
pass
if level:
lvar = merged_df.loc[:, "cat_var"].copy()
mask = lvar.isin([level])
lvar[mask] = 1
lvar[~mask] = 0
lvar = lvar.fillna(0)
merged_df.loc[:, "dichot_var"] = lvar
else:
pass
# push the df into the R env
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(merged_df)
R.assign("pheno.df", r_df)
# setup the map and plot the points
R('''suppressPackageStartupMessages(library(maps))''')
R('''suppressPackageStartupMessages(library(mapdata))''')
R('''uk_map <- map("worldHires", c("UK", "Isle of Wight",'''
'''"Ireland", "Isle of Man", "Wales:Anglesey"), '''
'''xlim=c(-11, 3), ylim=c(50, 60.9), plot=F)''')
# colour by reference, or a colour for each discrete value
if level:
R('''red <- rep("#FF0000", '''
'''times=length(pheno.df$dichot_var[pheno.df$dichot_var == 1]))''')
R('''black <- rep("#000000", '''
'''times=length(pheno.df$dichot_var[pheno.df$dichot_var == 0]))''')
R('''png("%(save_path)s", width=540, height=540, res=90)''' % locals())
R('''map(uk_map)''')
R('''points((-pheno.df[,"%(lat_col)s"])[pheno.df$dichot_var == 1], '''
'''(-pheno.df[,"%(long_col)s"])[pheno.df$dichot_var == 1], pch=".", col=red)''' % locals())
R('''points((pheno.df[,"%(long_col)s"])[pheno.df$dichot_var == 0], '''
'''(pheno.df[,"%(lat_col)s"])[pheno.df$dichot_var == 0], pch=".", col=black)''' % locals())
R('''legend('topleft', legend=c("not-%(level)s", "%(level)s"),'''
'''fill=c("#000000", "#FF0000"))''' % locals())
R('''dev.off()''')
else:
R('''png("%(save_path)s", width=540, height=540, res=90)''' % locals())
R('''map(uk_map)''')
R('''points(pheno.df[,"%(long_col)s"], pheno.df[,"%(lat_col)s"], pch=".", '''
'''col=factor(pheno.df$cat_var))''' % locals())
R('''legend('topleft', legend=unique(pheno.df$cat_var),'''
'''fill=unique(pheno.df$cat_var))''' % locals())
R('''dev.off()''')
def plotPhenotype(data, plot_type, x, y=None, group=None,
save_path=None, labels=None, xlabels=None,
ylabels=None, glabels=None, var_type="continuous"):
'''
Generate plots of phenotypes using ggplot
'''
# change data format if necessary and convert nan/NA to missing
if not y and var_type == "categorical":
var = np.nan_to_num(data.loc[:, x].copy())
data.loc[:, x] = pd.Series(var, dtype=str)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif not y and var_type == "integer":
var = np.nan_to_num(data.loc[:, x].copy())
data.loc[:, x] = pd.Series(var, dtype=np.int64)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif not y and var_type == "continuous":
var = data.loc[:, x].copy()
data.loc[:, x] = pd.Series(var, dtype=np.float64)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif y and var_type == "categorical":
xvar = np.nan_to_num(data.loc[:, x].copy())
yvar = np.nan_to_num(data.loc[:, y].copy())
data.loc[:, x] = pd.Series(xvar, dtype=str)
data.loc[:, y] = pd.Series(yvar, dtype=str)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif y and var_type == "integer":
xvar = np.nan_to_num(data.loc[:, x].copy())
yvar = np.nan_to_num(data.loc[:, y].copy())
data.loc[:, x] = pd.Series(xvar, dtype=np.int64)
data.loc[:, y] = pd.Series(yvar, dtype=np.int64)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif y and var_type == "continuous":
# NAs and NaNs should be handled by ggplot
xvar = data.loc[:, x].copy()
yvar = data.loc[:, y].copy()
data.loc[:, x] = pd.Series(xvar, dtype=np.float64)
data.loc[:, y] = pd.Series(yvar, dtype=np.float64)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
R('''suppressPackageStartupMessages(library(ggplot2))''')
# put the pandas dataframe in to R with rpy2
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(data)
R.assign("data_f", r_df)
# plotting parameters, including grouping variables and labels
# axis labels
try:
labs = labels.split(",")
except AttributeError:
labs = []
# if variable labels have been provided then assume they are
# categorical/factor variables.
# assume variable labels are input in the correct order
if xlabels:
try:
unique_obs = len(set(data.loc[:, x]))
xfact = len(xlabels.split(","))
if xfact == unique_obs:
R('''lvls <- unique(data_f[,"%(x)s"])''' % locals())
lbls = ro.StrVector([ri for ri in xlabels.split(",")])
R.assign("lbls", lbls)
R('''lvls <- lvls[order(lvls, decreasing=F)]''')
R('''data_f[,"%(x)s"] <- ordered(data_f[,"%(x)s"], '''
'''levels=lvls, labels=lbls)''' % locals())
else:
E.warn("the number of labels does not match the "
"number of unique observations, labels not "
"used.")
except AttributeError:
xlabels = None
else:
pass
if glabels:
unique_obs = len(set(data.loc[:, group]))
gfact = len(glabels.split(","))
if gfact == unique_obs:
R('''lvls <- unique(data_f[, "%(group)s"])''' % locals())
lbls = ro.StrVector([rg for rg in glabels.split(",")])
R.assign("lbls", lbls)
R('''lvls <- lvls[order(lvls, decreasing=F)]''')
R('''data_f[,"%(group)s"] <- ordered(data_f[,"%(group)s"], '''
'''levels=lvls, labels=lbls)''' % locals())
else:
E.warn("the number of labels does not match the "
"number of unique observations, labels not "
"used.")
# start constructing the plot
# if X and Y are specified, assume Y is a variable to colour
# observations by, unless group is also set.
# If Y and group then colour by group and split by Y
if y:
R('''p <- ggplot(aes(x=%s, y=%s), data=data_f)''' % (x, y))
if plot_type == "histogram":
if group:
R('''p <- p + geom_histogram(aes(colour=%(group)s)) + '''
'''facet_grid(. ~ %(y)s)''' % locals())
else:
R('''p <- p + geom_histogram(aes(colour=%(y)s))''' % locals())
elif plot_type == "barplot":
if group:
R('''p <- p + geom_bar(aes(colour=%(group)s)) + '''
'''facet_grid(. ~ %(y)s)''' % locals())
else:
R('''p <- p + geom_bar(aes(colour=%(y)s))''' % locals())
elif plot_type == "density":
if group:
R('''p <- p + geom_density(aes(colour=%(group)s)) + '''
'''facet_grid(. ~ %(y)s)''' % locals())
else:
R('''p <- p + geom_density(aes(colour=%(y)s))''' % locals())
elif plot_type == "boxplot":
if group:
R('''p <- p + geom_boxplot(group=%(group)s,'''
'''aes(x=factor(%(x)s), y=%(y)s, fill=%(group)s))''' % locals())
else:
R('''p <- p + geom_boxplot(aes(colour=%(x)s))''' % locals())
elif plot_type == "scatter":
if group:
R('''p <- p + geom_point(size=1, aes(colour=%(group)s))''' % locals())
else:
R('''p <- p + geom_point(size=1)''')
if len(labs) == 1:
xlab = labs[0]
R('''p <- p + labs(x="%s")''' % xlab)
elif len(labs) == 2:
xlab = labs[0]
ylab = labs[1]
R('''p <- p + labs(x="%(xlab)s", y="%(ylab)s")''' % locals())
elif len(labs) == 3:
xlab = labs[0]
ylab = labs[1]
title = labs[2]
R('''p <- p + labs(x="%(xlab)s", y="%(ylab)s", '''
'''title="%(title)s")''' % locals())
elif len(labs) == 4:
xlab = labs[0]
ylab = labs[1]
glab = labs[2]
title = labs[3]
R('''p <- p + labs(x="%(xlab)s", y="%(ylab)s",'''
'''title="%(title)s")''' % locals())
# need to add in guide/legend title
else:
R('''p <- ggplot(data=data_f)''')
if plot_type == "histogram":
if group:
R('''p <- p + geom_histogram(aes(%(x)s)) + '''
'''facet_grid(. ~ %(group)s)''' % locals())
else:
R('''p <- p + geom_histogram(aes(%s))''' % x)
elif plot_type == "barplot":
if group:
R(''' p <- p + geom_bar(aes(%(x)s)) + '''
'''facet_grid(. ~ %(group)s)''')
else:
R('''p <- p + geom_bar(aes(%s))''' % x)
elif plot_type == "density":
if group:
R('''p <- p + geom_density(aes(%(x)s)) + '''
'''facet_grid(. ~ %(group)s)''' % locals())
else:
R('''p <- p + geom_density(aes(%s))''' % x)
elif plot_type == "boxplot":
if group:
R('''p <- p + geom_boxplot(aes(y=%(x)s, '''
'''x=factor(%(group)s)))''' % locals())
else:
raise AttributeError("Y or group variable is missing")
if len(labs) == 1:
xlab = labs[0]
R('''p <- p + labs(x="%s")''' % xlab)
elif len(labs) == 2:
xlab = labs[0]
title = labs[1]
R('''p <- p + labs(x="%(xlab)s", '''
'''title="%(title)s")''' % locals())
elif len(labs) == 3:
if group:
xlab = labs[0]
glab = labs[1]
title = labs[2]
R('''p <- p + labs(x="%(glab)s", y="%(xlab)s",'''
'''title="%(title)s")''' % locals())
else:
E.warn("too many labels provided, assume first is X, "
"and second is plot title")
xlab = labs[0]
title = labs[1]
R('''p <- p + labs(x="%(xlab)s", '''
'''title="%(title)s")''' % locals())
# the default theme is bw
R('''p <- p + theme_bw()''')
R('''png("%(save_path)s")''' % locals())
R('''print(p)''')
R('''dev.off()''')
def parseFlashPCA(pcs_file, fam_file):
'''
Parse the principal components file from FlashPCA
and match with individual identifiers. This
assumes the output order of FlashPCA is the same
as the input order in the .fam file
'''
try:
pc_df = pd.read_table(pcs_file, sep="\s*",
header=None, index_col=None)
except StopIteration:
pc_df = pd.read_table(pcs_file, sep="\t",
header=None, index_col=None)
# add a header to the pc_df file
headers = ["PC%i" % (n + 1) for n,
m in enumerate(pc_df.columns)]
pc_df.columns = headers
fam_df = pd.read_table(fam_file, sep="\t",
header=None, index_col=None)
fam_df.columns = ["FID", "IID", "PAR", "MAT", "GENDER",
"PHENO"]
pc_df[["FID", "IID"]] = fam_df.iloc[:, :2]
return pc_df
def plotPCA(data, nPCs, point_labels, save_path,
headers, metadata=None, multiplot=False):
'''
Plot N principal components from a PCA either as
a single plot of the first 2 PCs, a grid plot of
N PCs.
Arguments
---------
data: string
PATH to file containing principal components
nPCs: int
number of principal components to plot. If this
value is > 2, then multiplot will be enabled
automatically
point_labels: vector
a vector of labels of length correpsonding to
the number of rows in the data file. These are
used to colour the points in the plot with relevant
metadata. Alternatively, can be the column header
in the metadata file that corresponds to annotations
save_path: string
An absolute PATH to save the plot(s) to
headers: boolean
whether the `data` file contains header delineating the
columns
metadata: string
file containing metadata to annotate plot with, includes
point_labels data
multiplot: boolean
If True, generate a grid of scatter plots with successive
PCs plotted against each other
Returns
-------
None
'''
py2ri.activate()
if metadata:
meta_df = pd.read_table(metadata, sep="\t", header=0,
index_col=None)
else:
pass
labels = meta_df[["FID", "IID", point_labels]]
merged = pd.merge(data, labels, left_on="FID",
right_on="FID", how='inner')
# TO DO: enable multiplotting of many PCs
r_df = py2ri.py2ri_pandasdataframe(merged)
R.assign("pc.df", r_df)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''pc.df[["%(point_labels)s"]] <- as.factor(pc.df[["%(point_labels)s"]])''' % locals())
R('''p_pcs <- ggplot(pc.df, aes(x=PC1, y=PC2, colour=%s)) + '''
'''geom_point(size=1) + theme_bw() + '''
'''labs(x="PC1", y="PC2", title="PC1 vs. PC2 LD trimmed genotypes")''' % point_labels)
R('''png("%s")''' % save_path)
R('''print(p_pcs)''')
R('''dev.off()''')
def countByVariantAllele(ped_file, map_file):
'''
Count the number of individuals carrying the variant allele
for each SNP.
Count the number of occurences of each allele with the variant
allele of each other SNP.
Requires ped file genotyping to be in format A1(minor)=1, A2=2
'''
# parse the ped file - get the variant column headers from
# the map file - no headers with these files
# variant order in the map file matters, use an ordered dict
variants = collections.OrderedDict()
with open(map_file, "r") as mfile:
for snp in mfile.readlines():
attrs = snp.split("\t")
snpid = attrs[1]
variants[snpid] = {"chr": attrs[0],
"pos": attrs[-1].strip("\n")}
variant_ids = variants.keys()
# store genotype matrix as an array
# rows and columns are variant IDs
homA1 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
homA2 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
het = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
tcount = 0
with open(ped_file, "r") as pfile:
for indiv in pfile.readlines():
indiv = indiv.strip("\n")
indiv_split = indiv.split("\t")
fid = indiv_split[0]
iid = indiv_split[1]
mid = indiv_split[2]
pid = indiv_split[3]
gender = indiv_split[4]
phen = indiv_split[5]
genos = indiv_split[6:]
tcount += 1
# get genotype counts
for i in range(len(genos)):
# missing genotypes are coded '00' in plink format
if genos[i] == "00":
pass
elif genos[i] == "11":
homA1[i, i] += 1
elif genos[i] == "12":
het[i, i] += 1
else:
homA2[i, i] += 1
allele_counts = ((2 * homA2) + het)/float(2 * tcount)
mafs = 1 - allele_counts.diagonal()
maf_df = pd.DataFrame(zip(variant_ids, mafs), columns=["SNP", "MAF"],
index=[x for x, y in enumerate(variant_ids)])
maf_df["A2_HOMS"] = (2 * homA1).diagonal()
maf_df["A2_HETS"] = het.diagonal()
maf_df.index = maf_df["SNP"]
maf_df.drop(["SNP"], axis=1, inplace=True)
E.info("allele frequencies calculated over %i SNPs and "
"%i individuals" % (len(genos), tcount))
return maf_df
def calcMaxAlleleFreqDiff(ped_file, map_file, group_file,
test=None, ref=None):
'''
Calculate the allele frequency difference between
two groups of individuals based upon some prior
assignment.
Arguments
---------
ped_file: string
plink text format .ped file - see Plink documentation
for details (https://www.cog-genomics.org/plink2/input#ped)
map_file: string
plink test format .map file - see Plink documentation
for details (https://www.cog-genomics.org/plink2/input#ped)
group_file: string
a file containing grouping information, must be in standard
Plink format with IID, FID, GROUP as the columns
test: string
group label to use as the test case for calculating
allele frequency differences. If this isn't set, then
the first non-ref value encountered will be set as test
ref: string
group label to use as the reference case for calculating
allele frequency differences. If not set, then the first
value encountered will be the test.
Returns
-------
freq_diffs: pandas.Core.DataFrame
dataframe of SNP information and allele frequency difference
between group labels
'''
# group labels need to be of the same type, convert all
# group values to string
group_df = pd.read_table(group_file, sep="\t", header=0,
index_col=None,
converters={"GROUP": str,
"FID": str,
"IID": str})
group_df["GROUP"] = [str(xg) for xg in group_df["GROUP"]]
try:
assert ref
E.info("Reference label set to %s" % ref)
except AssertionError:
ref = set(group_df["GROUP"])[0]
E.info("Reference label not provided. Setting "
"reference label to %s" % ref)
try:
assert test
E.info("Test label set to %s" % test)
except AssertionError:
test = [tx for tx in set(group_df["GROUP"]) if not ref][0]
E.info("Test label not provided, setting test "
"label to %s." % test)
# parse the ped file - get the variant column headers from
# the map file - no headers with these files
# variant order in the map file matters, use an ordered dict
variants = collections.OrderedDict()
with open(map_file, "r") as mfile:
for snp in mfile.readlines():
attrs = snp.split("\t")
snpid = attrs[1]
variants[snpid] = {"chr": attrs[0],
"pos": attrs[-1].strip("\n")}
variant_ids = variants.keys()
# store genotype matrix as an array
# rows and columns are variant IDs
ref_homA1 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
ref_homA2 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
ref_het = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
test_homA1 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
test_homA2 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
test_het = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
tcount = 0
rcount = 0
ncount = 0
ref_ids = group_df["IID"][group_df["GROUP"] == ref].values
test_ids = group_df["IID"][group_df["GROUP"] == test].values
total = len(group_df)
with open(ped_file, "r") as pfile:
for indiv in pfile.readlines():
indiv = indiv.strip("\n")
indiv_split = indiv.split("\t")
fid = indiv_split[0]
iid = indiv_split[1]
mid = indiv_split[2]
pid = indiv_split[3]
gender = indiv_split[4]
phen = indiv_split[5]
genos = indiv_split[6:]
# check for ref and test conditions
# ignore individuals in neither camp
if iid in test_ids:
tcount += 1
# get genotype counts
for i in range(len(genos)):
# missing genotypes are coded '00' in plink format
if genos[i] == "00":
pass
elif genos[i] == "11":
test_homA1[i, i] += 1
elif genos[i] == "12":
test_het[i, i] += 1
else:
test_homA2[i, i] += 1
elif iid in ref_ids:
rcount += 1
# get genotype counts
for i in range(len(genos)):
# missing genotypes are coded '00' in plink format
if genos[i] == "00":
pass
elif genos[i] == "11":
ref_homA1[i, i] += 1
elif genos[i] == "12":
ref_het[i, i] += 1
else:
ref_homA2[i, i] += 1
else:
ncount += 1
if round((tcount + rcount + ncount)/total, 2) == 0.25:
E.info("%i samples counted."
"Approximately 25% samples counted" % tcount + rcount + ncount)
elif round((tcount + rcount + ncount)/total, 2) == 0.50:
E.info("%i samples counted."
"Approximately 50% samples counted" % tcount + rcount + ncount)
elif round((tcount + rcount + ncount)/total, 2) == 0.75:
E.info("%i samples counted."
"Approximately 75% samples counted" % tcount + rcount + ncount)
E.info("Counted alleles for %i test cases, %i ref cases,"
" %i neither reference nor test." % (tcount, rcount,
ncount))
ref_allele_counts = ((2 * ref_homA2) + ref_het)/float(2 * rcount)
test_allele_counts = ((2 * test_homA2) + test_het)/float(2 * tcount)
ref_mafs = 1 - ref_allele_counts.diagonal()
test_mafs = 1 - ref_allele_counts.diagonal()
ref_maf_df = pd.DataFrame(zip(variant_ids, ref_mafs),
columns=["SNP", "ref_MAF"],
index=[x for x, y in enumerate(variant_ids)])
ref_maf_df["ref_A2_HOMS"] = (2 * ref_homA1).diagonal()
ref_maf_df["ref_A2_HETS"] = ref_het.diagonal()
ref_maf_df.index = ref_maf_df["SNP"]
ref_maf_df.drop(["SNP"], axis=1, inplace=True)
test_maf_df = pd.DataFrame(zip(variant_ids, test_mafs),
columns=["SNP", "test_MAF"],
index=[x for x, y in enumerate(variant_ids)])
test_maf_df["test_A2_HOMS"] = (2 * test_homA1).diagonal()
test_maf_df["test_A2_HETS"] = test_het.diagonal()
test_maf_df.index = test_maf_df["SNP"]
test_maf_df.drop(["SNP"], axis=1, inplace=True)
freq_diffs = pd.merge(ref_maf_df, test_maf_df,
left_index=True, right_index=True,
how='inner')
freq_diffs["MAF_diff"] = freq_diffs["ref_MAF"] - freq_diffs["test_MAF"]
E.info("allele frequencies calculated over %i SNPs and "
"%i individuals" % (len(genos), tcount + rcount))
return freq_diffs
def calcPenetrance(ped_file, map_file, mafs=None,
subset=None, snpset=None):
'''
Calculate the proportion of times an allele is observed
in the phenotype subset vs it's allele frequency.
This is the penetrance of the allele
i.e. if observed in 100% of affected individuals and 0%
of controls, then penetrance is 100%
Generates a table of penetrances for each variants/allele
and a plot of MAF vs # cases carrying the allele
Generates a heatmap of compound heterozygotes, and homozygotes
with penetrances.
Outputs a table of SNPs, homozygote and heterozygote counts
among subset individuals and proportion of subset individual
phenotype explained by homozygotes and heterozygotes
Requires alleles are coded A1(minor)=1, A2=2
'''
# check subset is set, if not then throw an error
# cannot calculate penetrance without a phenotype
if not subset:
raise ValueError("Cannot calculate penetrance of alleles "
"without a phenotype to subset in")
else:
pass
# parse the ped file - get the variant column headers from
# the map file - no headers with these files
# variant order in the map file matters, use an ordered dict
variants = collections.OrderedDict()
with open(map_file, "r") as mfile:
for snp in mfile.readlines():
attrs = snp.split("\t")
snpid = attrs[1]
variants[snpid] = {"chr": attrs[0],
"pos": attrs[-1].strip("\n")}
if snpset:
with iotools.open_file(snpset, "r") as sfile:
snps = sfile.readlines()
snps = [sx.rstrip("\n") for sx in snps]
variant_ids = [ks for ks in variants.keys() if ks in snps]
else:
variant_ids = variants.keys()
var_idx = [si for si, sj in enumerate(variant_ids)]
case_mat = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.float64)
all_mat = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.float64)
tcount = 0
ncases = 0
# missing phenotype individuals must be ignored, else
# they will cause the number of individuals explained
# to be underestimated
with open(ped_file, "r") as pfile:
for indiv in pfile.readlines():
indiv = indiv.strip("\n")
indiv_split = indiv.split("\t")
fid = indiv_split[0]
iid = indiv_split[1]
mid = indiv_split[2]
pid = indiv_split[3]
gender = int(indiv_split[4])
phen = int(indiv_split[5])
if phen != -9:
if subset == "cases":
select = phen
elif subset == "gender":
select = gender
else:
select = None
genos = np.array(indiv_split[6:])
genos = genos[var_idx]
tcount += 1
het = np.zeros(len(genos), dtype=np.float64)
hom = np.zeros(len(genos), dtype=np.float64)
for i in range(len(genos)):
# missing values are coded '00' in plink format
# A2 homs are coded '11' in plink format
if genos[i] == "11":
hom[i] += 1
elif genos[i] == "12":
het[i] += 1
else:
pass
hom_mat = np.outer(hom, hom)
het_mat = np.outer(het, het)
homs = hom_mat.diagonal()
het_mat[np.diag_indices(len(genos))] = homs
gen_mat = het_mat
# separate matrix for subset
# reference is always level 2 for plink files,
# either cases or females
if select == 2:
case_mat += gen_mat
all_mat += gen_mat
ncases += 1
else:
all_mat += gen_mat
else:
pass
E.info("alleles counted over %i SNPs "
"and %i individuals, of which %i are "
"in the %s subset" % (len(genos), tcount, ncases, subset))
penetrance = np.divide(case_mat, all_mat)
# round for the sake of aesthetics
penetrance = np.round(penetrance, decimals=5)
pen_df = pd.DataFrame(penetrance, columns=variant_ids,
index=variant_ids)
pen_df = pen_df.fillna(0.0)
case_df = pd.DataFrame(case_mat, columns=variant_ids,
index=variant_ids)
all_df = pd.DataFrame(all_mat, columns=variant_ids,
index=variant_ids)
# plot heatmap of penetrances as percentages
indf = pen_df * 100
py2ri.activate()
# only plot penetrances > 0%
r_pen = py2ri.py2ri_pandasdataframe(indf)
r_cases = py2ri.py2ri_pandasdataframe(case_df)
r_all = py2ri.py2ri_pandasdataframe(all_df)
R.assign("pen.df", r_pen)
R.assign("case.df", r_cases)
R.assign("all.df", r_all)
R('''suppressPackageStartupMessages(library(gplots))''')
R('''suppressPackageStartupMessages(library(RColorBrewer))''')
# penetrances
E.info("plotting penetrance matrix")
R('''hmcol <- colorRampPalette(brewer.pal(9, "BuGn"))(100)''')
R('''rowpen <- pen.df[rowSums(pen.df) > 0,]''')
R('''colpen <- rowpen[,colSums(rowpen) > 0]''')
R('''png("%s/penetrance-matrix.png", width=720, height=720)''' % os.getcwd())
R('''heatmap.2(as.matrix(colpen), trace="none", col=hmcol,'''
'''dendrogram="none", Colv=colnames(colpen), key=FALSE, '''
'''Rowv=rownames(colpen), margins=c(10,10), cellnote=round(colpen),'''
'''notecol="white")''')
R('''dev.off()''')
E.info("plotting case counts matrix")
R('''rowcase <- case.df[rowSums(case.df) > 0,]''')
R('''colcase <- rowcase[,colSums(rowcase) > 0]''')
R('''png("%s/cases-matrix.png", width=720, height=720)''' % os.getcwd())
R('''heatmap.2(as.matrix(colcase), trace="none", col=rep("#F0F8FF", 100),'''
'''dendrogram="none", Colv=colnames(colcase), key=FALSE, '''
'''colsep=seq(1:length(colnames(colcase))), '''
'''rowsep=seq(1:length(rownames(colcase))),'''
'''Rowv=rownames(colcase), margins=c(10,10), cellnote=round(colcase),'''
'''notecol="black")''')
R('''dev.off()''')
E.info("plotting all individuals matrix")
R('''rowall <- all.df[rownames(colcase),]''')
R('''colall <- rowall[,colnames(colcase)]''')
R('''png("%s/all-matrix.png", width=720, height=720)''' % os.getcwd())
R('''heatmap.2(as.matrix(colall), trace="none", col=rep("#F0F8FF", 100),'''
'''dendrogram="none", Colv=colnames(colall), key=FALSE, '''
'''colsep=seq(1:length(colnames(colall))), '''
'''rowsep=seq(1:length(rownames(colall))), '''
'''Rowv=rownames(colall), margins=c(10,10), cellnote=round(colall),'''
'''notecol="black")''')
R('''dev.off()''')
# plot MAF vs homozygosity
maf_df = pd.read_table(mafs, sep="\t", header=0, index_col=0)
plot_df = pd.DataFrame(columns=["MAF"],
index=maf_df.index)
plot_df["MAF"] = maf_df["MAF"]
homs = case_mat.diagonal()
hom_series = pd.Series({x: y for x, y in zip(variant_ids,
homs)})
plot_df["explained_by_homozygotes"] = hom_series
plot_df["SNP"] = plot_df.index
plot_df.index = [ix for ix, iy in enumerate(plot_df.index)]
plotPenetrances(plotting_df=plot_df)
out_df = summaryPenetrance(maf_df=maf_df,
case_counts=case_mat,
variants=variant_ids,
n_cases=ncases,
n_total=tcount)
return out_df, pen_df
def summaryPenetrance(maf_df, case_counts,
variants, n_cases, n_total):
'''
Summarise genotype counts and proportion of cases explained
by the observed homozygotes and compound heterozygotes.
This is a function of the total population size and
population allele frequency - does this assume 100%
penetrance of each allele?
'''
# homozygous individuals are on the
# diagonal of the case_counts array
homozyg_cases = case_counts.diagonal()
homozyg_series = pd.Series({x: y for x, y in zip(variants,
homozyg_cases)})
# heterozygotes are on the off-diagonal elements
# get all off diagonal elements by setting diagonals to zero
# matrix is diagonal symmetric
np.fill_diagonal(case_counts, 0)
het_counts = np.sum(case_counts, axis=0)
het_series = pd.Series({x: y for x, y in zip(variants,
het_counts)})
out_df = pd.DataFrame(columns=["homozygote_cases",
"heterozygote_cases"],
index=maf_df.index)
out_df["MAF"] = maf_df["MAF"]
out_df["homozygote_cases"] = np.round(homozyg_series, 1)
out_df["expected_cases"] = np.round(((out_df["MAF"] ** 2) * n_total), 3)
out_df["heterozygote_cases"] = het_series
out_df["hom_prop_explained"] = np.round(homozyg_series/float(n_cases), 3)
out_df["het_prop_explained"] = np.round(het_series/float(n_cases), 3)
return out_df
def plotPenetrances(plotting_df):
'''
Plot the proportion of cases/phenotype explained by
individuals carrying allele vs. population allele frequency.
Generate final output summary table (should be in separate function)
'''
# only need to plot variants with MAF >= 0.01
low_frq = plotting_df["MAF"] < 0.01
hi_df = plotting_df[~low_frq]
# get into R and use ggplot for MAF vs homozygosity amongs cases
r_plot = py2ri.py2ri_pandasdataframe(hi_df)
R.assign("hom.df", r_plot)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''png("%s/penetrance-plot.png", height=720, width=720)''' % os.getcwd())
R('''pen_p <- ggplot(hom.df, aes(x=explained_by_homozygotes, y=MAF, colour=SNP)) + '''
'''geom_point(size=4) + theme_bw() + '''
'''geom_text(aes(label=explained_by_homozygotes),'''
'''colour="black",vjust=0.5, hjust=0.5) + '''
'''labs(x="Number of Red haired homozygotes", y="MAF") + '''
'''theme(axis.title=element_text(size=10, colour="black"))''')
R('''print(pen_p)''')
R('''dev.off()''')
def findDuplicateVariants(bim_file, take_last=False):
'''
identify variants with duplicate position and reference
alleles
'''
# count the number of lines first to get
# the necessary array sizes
E.info("getting number of variants")
lines = 1
with open(bim_file, "r") as bfile:
for line in bfile.readlines():
lines += 1
E.info("%i variants found" % lines)
# setup index arrays
var_array = np.empty(lines, dtype=object)
ref_alleles = np.empty(lines, dtype=object)
pos_array = np.zeros(lines, dtype=np.int64)
minor_alleles = np.empty(lines, dtype=object)
idx = 0
# find duplicates on position
with open(bim_file, "r") as bfile:
for line in bfile.readlines():
line = line.rstrip("\n")
varline = line.split("\t")
var = varline[1]
pos = int(varline[3])
ref_allele = varline[-1]
minor_allele = varline[-2]
var_array[idx] = var
ref_alleles[idx] = ref_allele
minor_alleles[idx] = minor_allele
pos_array[idx] = pos
idx += 1
# find duplicates using pandas series
pos_series = pd.Series(pos_array)
dup_last = pos_series[pos_series.duplicated(take_last=True)]
dup_first = pos_series[pos_series.duplicated(take_last=False)]
var_series = pd.Series(var_array)
ref_series = pd.Series(ref_alleles)
alt_series = pd.Series(minor_alleles)
# a few variants have duplicate IDs - count these as duplicates
# and add to the exclusion list - these won't be identified
# based on shared position necessarily - force add them
ref_first = ref_series[ref_series.duplicated(take_last=False)]
ref_last = ref_series[ref_series.duplicated(take_last=True)]
ref_dups = set(ref_first.index).union(ref_last.index)
# union of take first and take last
dup_all = set(dup_last.index).union(set(dup_first.index))
dup_complete = dup_all.union(ref_dups)
dup_idx = np.array([sx for sx in dup_complete])
dup_idx.sort()
# make a dataframe to hold all triallelic and duplicate variants
dup_dict = {"SNP": var_series[dup_idx],
"BP": pos_series[dup_idx],
"REF": ref_series[dup_idx],
"VAR": alt_series[dup_idx]}
dup_df = pd.DataFrame(dup_dict)
# some variants may have more than one ID/entry
# step through using pandas groupby - group on position
E.info("looking for duplicates and triallelic variants")
tri_alleles = []
dups_alleles = []
overlap_vars = []
for names, groups in dup_df.groupby(["BP"]):
# if there is only one reference allele, indicates a
# triallelic variant, otherwise its probably a duplicate
# or overlaping INDEL and SNV
var_lens = groups["VAR"].apply(len)
if groups.shape[0] == 1:
pass
elif np.mean(var_lens) > 1:
# probably overlapping variants, exclude, but report
# separately
over_vars = groups["SNP"].values.tolist()
for ovs in over_vars:
overlap_vars.append(ovs)
elif len(set(groups["REF"])) == 1:
tri_vars = groups["SNP"].values.tolist()
for tri in tri_vars:
tri_alleles.append(tri)
else:
dup_vars = groups["SNP"].values.tolist()
for dup in dup_vars:
dups_alleles.append(dup)
E.info("%i triallelic variants found" % len(tri_alleles))
E.info("%i duplicate position variants found" % len(dups_alleles))
E.info("%i overlapping SNVs and INDELs found" % len(overlap_vars))
return dups_alleles, tri_alleles, overlap_vars
def flagExcessHets(hets_file, plot=True, plot_path=None):
'''
Take output from Plink 1.9 --het command
calculate heterozygosity rate and flag individuals
with heterozygosity > 3 s.d. from the mean
value.
This assumes all individuals are from the same
population, and thus form a homogenous cluster,
with only outliers at the extremes.
Visualise the data, if there are multiple apparent
clusters then filter for ethnicity/ancestry first
'''
if hets_file.endswith("gz"):
compression = "gzip"
else:
compression = None
het_df = pd.read_table(hets_file, header=0, index_col=None,
sep="\t", compression=compression)
nmiss = pd.Series(het_df.loc[:, "N(NM)"], dtype=np.float64)
nhoms = het_df.loc[:, "O(HOM)"]
het_df["het_rate"] = (nmiss - nhoms) / nmiss
# get mean value and std, set upper and lower thresholds
mean_het = np.mean(het_df.loc[:, "het_rate"].values)
sd_het = np.std(het_df.loc[:, "het_rate"].values)
upper = mean_het + (3 * sd_het)
lower = mean_het - (3 * sd_het)
hi_hets = het_df[het_df["het_rate"] > upper]
lo_hets = het_df[het_df["het_rate"] < lower]
E.info("%i individuals with high heterozygosity" % len(hi_hets))
E.info("%i individuals with low heterozygosity" % len(lo_hets))
hi_hets["exclude"] = "high_heterozygosity"
lo_hets["exclude"] = "low_heterozygosity"
all_flags = lo_hets.append(hi_hets)
if plot:
E.info("plotting heterozygosity rate distribution")
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(het_df)
R.assign("het.df", r_df)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''p <- ggplot(het.df, aes(het_rate)) + '''
'''geom_histogram() + '''
'''labs(title="Distribution of heterozygosity rate") + '''
'''theme_bw() + '''
'''geom_vline(xintercept=c(%0.3f, %0.3f), '''
'''linetype=2, col="#838B83")''' % (lower, upper))
R('''png("%s/het_rate-hist.png")''' % plot_path)
R('''print(p)''')
R('''dev.off()''')
return all_flags
def flagGender(gender_file, plot=True, plot_path=None):
'''
Parse the .sexcheck output report from Plink
--sex-check and flag gender discordant individuals.
Arguments
---------
gender_file: string
the .sexcheck output report file from Plink --sex-check
plot: boolean
generate a histogram of F values distributions showing male and
female clusters, split by reported gender
plot_path: string
PATH to save F coefficient histogram
Returns
-------
discords: pandas.Core.DataFrame
a pandas dataframe of individuals that are gender discordant
'''
gender_df = pd.read_table(gender_file, header=0,
index_col=None, sep=None)
genders = lambda x: "male" if x == 1 else "female"
gender_df["GENDER"] = gender_df["PEDSEX"].apply(genders)
E.info("checking individuals for discordance")
discords = gender_df[gender_df["STATUS"] != "OK"]
discords.drop(labels=["PEDSEX", "SNPSEX", "STATUS", "F",
"GENDER"],
axis=1, inplace=True)
E.info("%i individuals with discordant gender" % len(discords))
if plot:
E.info("plotting F gender coefficient distributions")
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(gender_df)
R.assign("gender.df", r_df)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''p <- ggplot(gender.df, aes(F, fill=GENDER)) + '''
'''geom_histogram() + '''
'''labs(title="F coefficient distributions for gender") + '''
'''theme_bw() + facet_grid(. ~ GENDER)''')
R('''png("%s/gender_check-hist.png")''' % plot_path)
R('''print(p)''')
R('''dev.off()''')
else:
pass
return discords
def _compare_ibds(ibd_entry, threshold=0.03125):
'''
Just for internal use in `flagRelated` function.
To compare IBD estimates and flag up related
individuals
Arguments
---------
ibd_entry: pandas.Core.Series
a single line entry from an IBD estimates
file
threshold: float
the threshold at which to flag an individual as related
Returns
-------
flag: boolean
True if related, else false
'''
if ibd_entry["PI_HAT"] < threshold:
return False
else:
return True
def flagRelated(ibd_file, chunk_size=None,
threshold=0.03125, plot=True,
plotting_path=None):
'''
Use IBS estimates to find pairs of related individuals
above a threshold.
This will also flag up the number of duplicated/monozygotic
twin pairs (matrix diagonals).
Arguments
---------
ibd_file: string
file containing IBS estimates between pairs from Plink
or GCTA.
chunk_size: int
the file chunk size to read in at a time, should correspond
to the number of individuals. If not set, the whole file
is read in. Not recommend for large (>2GB) files.
threshold: float
IBS threshold, above which individuals will be flagged
as related. Default is 3rd cousins.
plot: boolean
generate a histogram of the distribution of IBS values.
Default = True
plotting_path: string
PATH to plot histogram to
Returns
-------
flagged: pandas.Core.DataFrame
dataframe of individuals to remove, with the estimated
relationship to another individual.
'''
# need to make this faster
# sequentially add new IDs only
related_list = []
ibds = []
if ibd_file.endswith("gz"):
comp = "gzip"
else:
pass
E.info("reading file in chunks of %i lines" % chunk_size)
if chunk_size:
# read in and operate on chunks
df_iter = pd.read_table(ibd_file, header=0, index_col=None,
delim_whitespace=True, compression=comp,
chunksize=chunk_size)
count = 0
for chunk in df_iter:
count += 1
entrys = chunk[["FID1", "IID1",
"FID2", "IID2",
"PI_HAT"]]
ibds.append(entrys)
relate_mask = entrys.apply(_compare_ibds, axis=1)
related = entrys[relate_mask]
E.info("%i relations found" % len(related))
related_list.append(related)
else:
pass
df = pd.concat(ibds, axis=0, keys=None)
if plot:
# for lots of observations, plot log counts
E.info("plotting pair-wise IBD distribution")
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(df)
R.assign("relate.df", r_df)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''p <- ggplot(relate.df, aes(PI_HAT+0.5)) + '''
'''geom_histogram(binwidth=0.01) + '''
'''labs(title="Proportion of IBD shared distribution") + '''
'''theme_bw() + scale_y_log10() + '''
'''geom_vline(xintercept=%(threshold)f, '''
'''linetype=4, colour="#838B83")''' % locals())
R('''png("%s/IBD-hist.png")''' % plotting_path)
R('''print(p)''')
R('''dev.off()''')
else:
pass
return related_list
def flagInbred(inbred_file, inbreeding_coefficient,
ibc_threshold=0.05,
plot=True, plot_path=None):
'''
Use Plink or GCTA's estimate of F, inbreeding coefficient
to flag individuals that are highly inbred.
Arguments
---------
inbred_file: string
file containing estimates of F
inbreeding_coefficient: string
coefficient to use to identify inbred individuals. This name
should correspond to one of the columns in `inbred_file`.
ibc_threshold: float
the threshold above which individuals will be flagged as inbred
plot: boolean
generate a histogram of the distribution of F coefficients
plotting_path: string
PATH to directoru for plotting F coefficient distribution
Returns
-------
inbreds: padas.Core.DataFrame
dataframe of inbred individuals to exclude from analysis
'''
inbreed_df = pd.read_table(inbred_file, header=0,
index_col=None, sep="\t")
E.info("Identifing individuals with inbreeding coefficient"
" greater than %0.3f" % ibc_threshold)
inbreds = inbreed_df[inbreed_df[inbreeding_coefficient] > ibc_threshold]
inbreds = inbreds[["FID", "IID"]]
E.info("%i individuals with high inbreeding "
"coefficient" % len(inbreds))
if plot:
E.info("plotting F coefficient distributions")
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(inbreed_df)
R.assign("inbreed.df", r_df)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''p <- ggplot(inbreed.df, aes(%(inbreeding_coefficient)s)) + '''
'''geom_histogram(binwidth=0.01) + '''
'''labs(title="Inbreeding coefficient, %(inbreeding_coefficient)s,'''
'''distribution") + theme_bw() + '''
'''geom_vline(xintercept=%(ibc_threshold)0.3f, '''
'''linetype=4, colour="#838B83")''' % locals())
R('''png("%s/inbreeding-hist.png")''' % plot_path)
R('''print(p)''')
R('''dev.off()''')
else:
pass
return inbreds
def mergeQcExclusions(hets_file=None, inbred_file=None,
related_file=None, gender_file=None,
mask_file=None):
'''
Merge sets of excluded individuals into a single file for
downstream analysis, processing, etc
Arguments
---------
hets_file: string
file containing individuals to remove due to excessive or
reduced heterozygosity
inbred_file: string
file of individuals highly related to themselves for
exclusion
related_file: string
file of IDs of individuals pruned due to greater relatedness
than an arbitrary threshold
gender_file: string
individuals with discordant reported vs. genetic gender
mask_file: string
individuals to be excluded from analyses, unrelated
for reasons to QC (i.e. mask out category of individuals)
Returns
-------
exclusions: pandas.Core.DataFrame
A dataframe of FID and IIDs of the unique set of excluded
individuals
'''
if hets_file:
hets_df = pd.read_table(hets_file, sep="\t",
header=0, index_col=None)
E.info("%i exclusions due to "
"heterozygosity deviation" % len(hets_df))
else:
hets_df = None
E.warn("No heterozygosity exclusion file")
if inbred_file:
inbred_df = pd.read_table(inbred_file, sep="\t",
header=0, index_col=None)
E.info("%i exclusions due "
"to consanguinuity" % len(inbred_df))
else:
inbred_df = None
E.warn("No inbred exclusions")
if related_file:
related_df = pd.read_table(related_file, delim_whitespace=True,
header=None, index_col=None)
related_df.columns = ["FID", "IID"]
E.info("%i individuals excluded due "
"to high relatedness" % len(related_df))
else:
related_df = None
E.warn("No individuals excluded on relatedness")
if gender_file:
gender_df = pd.read_table(gender_file, sep="\t",
header=0, index_col=None)
E.info("%i individuals with discordant "
"gender recorded" % len(gender_df))
else:
gender_df = None
E.warn("No individuals exclued with "
"discordant gender")
if mask_file:
mask_df = pd.read_table(mask_file, sep="\t",
header=None, index_col=None)
E.info("%i individuals to be excluded "
"for additional reasons" % len(gender_df))
mask_df.columns = ["FID", "IID"]
else:
mask_df = None
df_list = [hets_df, inbred_df, related_df, gender_df,
mask_df]
df_true = [True for x in df_list if x is not False]
if not all(df_true):
raise ValueError("no QC files detected - do some QC!!")
else:
pass
# assume all df have FID and IID columns
real_df = [x for x in df_list if x is not None]
real_df = [x[["FID", "IID"]] for x in real_df]
full_df = pd.concat(real_df, keys=None, axis=0)
exclusions = full_df.drop_duplicates(subset=["FID",
"IID"],
take_last=True,
inplace=False)
return exclusions
def selectLdFromTabix(ld_dir, chromosome, snp_pos,
ld_threshold=0.01):
'''
Select all LD values from a tabix indexed BGZIP
file of LD. Assumes Plink format.
Arguments
---------
ld_dir: string
path to directory containing LD data
chromosome: string
chromosome of SNP to pull out LD values
assumes chrN format
snp_pos: int
bp mapping position of the SNP on the same
genome build as the LD was calculated
ld_threshold: float
minimum LD value to return
Returns
-------
ld_df: pandas.Core.DataFrame
Pandas dataframe containing LD values over
target range.
'''
tab_dir = [td for td in os.listdir(ld_dir) if re.search(".bgz$", td)]
contig = int(chromosome.lstrip("chr"))
start = snp_pos
end = snp_pos
tab_query = """
tabix %(ld_dir)s/%(tab_indx)s %(contig)i:%(start)i-%(end)i |
awk '{if($7 >= %(ld_threshold)s) print $0}'"""
tab_indx = [tx for tx in tab_dir if re.search(chromosome,
tx)][-1]
E.info("Retrieving LD values at bp: %i" % snp_pos)
proc = subprocess.Popen(tab_query % locals(),
shell=True,
stdout=subprocess.PIPE)
ld_dict = {}
count = 0
for line in proc.stdout:
snp_dict = {}
parse = line.split("\t")
snp_dict["CHR_A"] = int(parse[0])
snp_dict["BP_A"] = int(parse[1])
snp_dict["SNP_A"] = parse[2]
snp_dict["CHR_B"] = int(parse[3])
snp_dict["BP_B"] = int(parse[4])
snp_dict["SNP_B"] = parse[5]
snp_dict["R2"] = float(parse[6])
snp_dict["DP"] = float(parse[7])
count += 1
ld_dict[count] = snp_dict
ld_df = pd.DataFrame(ld_dict).T
# ld Dataframe may be empty, return
# empty dataframe
try:
ld_df.index = ld_df["SNP_B"]
ld_df.drop_duplicates(subset="SNP_B",
keep="last",
inplace=True)
except KeyError:
E.info("No SNPs detected in LD "
"with r^2 > {}".format(ld_threshold))
ld_df = pd.DataFrame(0.0,
index=[snp_pos],
columns=["SNP_A",
"DP",
"R2"])
return ld_df
def selectLdFromDB(database, table_name,
index_snp,
index_label=None,
ld_threshold=None):
'''
Select LD values from an SQL table over
a specific range. Large regions will consume
large memory and queries may take several
minutes to complete.
Arguments
---------
database: sql.connection
An SQL database connection to the DB
containing the LD values
table_name: string
The table to query containing LD information
index_snp: string
SNP ID to select LD values from the SQL
database on
index_label: str
Column label in SQL database to use as the
index in the output dataframe
ld_threshold: float
minimum LD value to return
Returns
-------
ld_df: pandas.Core.DataFrame
Pandas dataframe containing LD values over
target range.
'''
# UTF-8 codec struggles to decode ';' in some columns
database.text_factory = str
if ld_threshold:
state = '''
select SNP_A,SNP_B,R2 FROM %s where %s = "%s" AND
R2 > %0.3f;
''' % (table_name, index_label,
index_snp, ld_threshold)
else:
state = '''
select SNP_A,SNP_B,R2 FROM %s where %s = "%s";
''' % (table_name, index_label, index_snp)
ld_df = pdsql.read_sql(sql=state, con=database,
index_col=index_label)
return ld_df
def calcLdScores(ld_table, snps,
scale=False, metric="R2",
snp_list=None):
'''
Calculate the LD scores for SNPs across a chromosome,
stored in a SQL database.
Arguments
---------
ld_table: pandas.Core.DataFrame
Pandas dataframe in table format containing LD
values between SNPs. Columns are `SNP_A`, `SNP_B`
and `R2`.
snps: list
the snps over which to calculate LD scores
scale: bool
Whether to scale LD score by the number of SNPs
used to calculate the score. Useful if used
as a weighting for other SNP scores.
metric: string
Use either R^2 or D' as the LD metric
snp_list: list
A list of SNP IDs to restrict the
LD score calculation to
Returns
-------
ld_scores: float
LD scores for each SNP
'''
if len(ld_table) > 0:
if snp_list:
try:
# use np.sum to handle NaN values
ld_table = ld_table.loc[snp_list]
if metric == "R2":
ld_score = np.sum(ld_table["R2"])
elif metric == "DP":
ld_score = np.sum(ld_table["DP"])
except KeyError:
E.warn("None of the SNPs are in LD")
ld_score = 0
else:
if metric == "R2":
ld_score = np.sum(ld_table["R2"])
elif metric == "DP":
ld_score = np.sum(ld_table["DP"])
else:
ld_score = 0
if scale:
ld_scores = ld_score/len(ld_table)
else:
ld_scores = ld_score
return ld_scores
def calcWeightedEffects(gwas_results, snps, calc_se=True,
scale=False):
'''
Calculate the standard error weighted effect sizes score
for each SNP:
score = sum(ln(OR) * se)
Arguments
---------
gwas_results: pandas.Core.DataFrame
A dataframe of the results from a genome_wide association
study. Assumes SNP IDs are the index column.
snps: list
the snps over which to calculate the total weighted
effect size score.
calc_se: boolean
Calculate the standard error from the p-values and
effect sizes:
SE = ln(OR)/Z
Z = -0.862 + sqrt(0.743 - 2.404 * ln(P))
scale: boolean
Scale the sum of standard error weighted effect sizes
by the number of SNPs
Returns
-------
es_score: float
sum of SE weighted effect sizes
'''
# calculate standard error of effect size based on
# p-value and effect size
if calc_se:
# check p-values that = 0 are set to smallest floating
# point representation instead
gwas_results["P"][gwas_results["P"] == 0] = np.finfo(np.float64).min
z_func = lambda x: - 0.862 + sqrt(0.743 - 2.404 * np.log(x))
gwas_results["Z"] = gwas_results["P"].apply(z_func)
gwas_results["SE"] = abs(np.log(gwas_results["OR"])/gwas_results["Z"])
else:
E.warn("Standard errors have not been calculated, please "
"make sure they exist in this results table")
es_score = sum((abs(np.log(gwas_results["OR"])) * gwas_results["SE"]).fillna(0))
if scale and len(gwas_results):
return es_score/len(gwas_results)
else:
return es_score
def snpPriorityScore(gwas_results, chromosome, ld_dir=None,
clean=True, database=None, table_name=None):
'''
Generate SNP scores based on the amount of genetic variation
they capture and the sum of the weighted effect sizes for
the trait of interest.
This score can then be integrated with a score based on
the overlap with functional annotation features
of interest.
Arguments
---------
gwas_results: string
Results from a GWAS, assumed to be in Plink format.
ld_dir: string
directory containing tabix index LD files from Plink
database: string
Path to an SQL database containing LD values in
table format
table_name: string
Specific table, often referring to a specific
chromosome, that contains LD values with columns
SNP_A, SNP_B, BP_A, BP_B and R2.
chromosome: string
A chromosome to select from the gwas_results
file.
clean: boolean
Whether the results table has been pre-cleaned to
remove results not relevant to SNPs. e.g. if
covariates had been included in the regression
model these should be removed.
Returns
-------
SNP_scores: pd.Core.DataFrame
A pandas dataframe of LDscores, weight effect size
scores and SNP priority score.
'''
E.info("Reading association results from %s" % gwas_results)
gwas_df = pd.read_table(gwas_results, index_col=None,
sep="\t", header=0)
if clean:
gwas_df = pd.read_table(gwas_results, index_col=None,
sep="\t", header=0)
else:
gwas_df = pd.read_table(gwas_results, index_col=None,
sep="\s*", header=0)
gwas_df = gwas_df[gwas_df["TEST"] == "ADD"]
gwas_df.index = gwas_df["SNP"]
# in order to reduce the computational load it is
# necessary to break up the SNPs into regions.
# The logical way would be to use recombination
# hotspots, however, this will still leave
# some very large windows
# Use a moving window over the chromosome of
# ~250Kb, with 25kb overlap.
chr_df = gwas_df[gwas_df["CHR"] == int(chromosome)]
# duplicates cause selection of individual SNPs
# to break - why are there still duplicates??
chr_df.drop_duplicates(subset="BP", keep="last",
inplace=True)
priority_list = []
ld_scores = {}
es_scores = {}
priority_scores = {}
snp_set = chr_df.index
if database:
dbh = sql.connect(database)
else:
pass
# iterate over SNPs
for snp in snp_set:
if database:
ld_values = selectLdFromDB(dbh,
table_name=table_name,
index_snp=snp,
index_label="SNP_B")
elif ld_dir:
snp_pos = int(chr_df.loc[snp, "BP"])
ld_values = selectLdFromTabix(ld_dir=ld_dir,
chromosome=chromosome,
snp_pos=snp_pos)
ldsnps = ld_values.loc[:, "SNP_A"].values
ldsnps = {sx for sx in ldsnps}
ldscore = calcLdScores(ld_table=ld_values,
snps=ldsnps,
scale=False)
ld_scores[snp] = ldscore
try:
gwas_results = chr_df.loc[ldsnps]
escore = calcWeightedEffects(gwas_results=gwas_results,
snps=ldsnps,
calc_se=True,
scale=True)
except KeyError:
gwas_results = chr_df.loc[snp]
if gwas_results["P"] == 0:
gwas_results["P"] = np.finfo(np.float64).min
else:
pass
z_func = lambda x: - 0.862 + sqrt(0.743 - 2.404 * np.log(x))
gwas_results["Z"] = z_func(gwas_results["P"])
gwas_results["SE"] = abs(np.log(gwas_results["OR"])/gwas_results["Z"])
escore = gwas_results["SE"] * abs(np.log(gwas_results["OR"]))
es_scores[snp] = escore
weight = escore * ldscore
priority_scores[snp] = weight
SNP_scores = pd.DataFrame([ | pd.Series(ld_scores) | pandas.Series |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
(
(oc2['DOB'] + pd.DateOffset(years=4) == start) # ???
| (oc2['DOB'] + pd.DateOffset(years=17) == start)
)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
& oc2['SDQ_REASON'].isna()
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_157():
error = ErrorDefinition(
code='157',
description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the "
"year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no "
"Strengths and Difficulties Questionnaire (SDQ) score.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
oc2['CONTINUOUSLY_LOOKED_AFTER']
& (oc2['DOB'] + pd.DateOffset(years=4) <= start)
& (oc2['DOB'] + pd.DateOffset(years=16) >= endo)
& oc2['SDQ_SCORE'].isna()
& (oc2['SDQ_REASON'] == 'SDQ1')
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_357():
error = ErrorDefinition(
code='357',
description='If this is the first episode ever for this child, reason for new episode must be S. '
'Check whether there is an episode immediately preceding this one, which has been left out. '
'If not the reason for new episode code must be amended to S.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
eps = dfs['Episodes']
eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce')
eps = eps.loc[eps['DECOM'].notnull()]
first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()]
errs = first_eps[first_eps['RNE'] != 'S'].index.to_list()
return {'Episodes': errs}
return error, _validate
def validate_117():
error = ErrorDefinition(
code='117',
description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placed_adoption = dfs['PlacedAdoption']
collection_end = dfs['metadata']['collection_end']
# datetime
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# Drop nans and continuing episodes
episodes = episodes.dropna(subset=['DECOM'])
episodes = episodes[episodes['REC'] != 'X1']
episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()]
# prepare to merge
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED']
# latest episodes
merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
mask = (
(merged['DATE_PLACED'] > collection_end)
| (merged['DATE_PLACED'] > merged['DEC'])
| (merged['DATE_PLACED_CEASED'] > collection_end)
| (merged['DATE_PLACED_CEASED'] > merged['DEC'])
)
# If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1'
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_118():
error = ErrorDefinition(
code='118',
description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.',
affected_fields=['DECOM', 'DECOM', 'LS']
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
code_list = ['V3', 'V4']
# datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
filter_by_ls = episodes[~(episodes['LS'].isin(code_list))]
earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin()
earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)]
# prepare to merge
placed_adoption.reset_index(inplace=True)
earliest_episodes.reset_index(inplace=True)
# merge
merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
# drop rows where DATE_PLACED_CEASED is not provided
merged = merged.dropna(subset=['DATE_PLACED_CEASED'])
# If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start)
# error locations
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_352():
error = ErrorDefinition(
code='352',
description='Child who started to be looked after was aged 18 or over.',
affected_fields=['DECOM', 'RNE'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S'])
started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM']
error_mask = care_start & started_over_18
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_209():
error = ErrorDefinition(
code='209',
description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.',
affected_fields=['UPN', 'DOB']
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
collection_start = dfs['metadata']['collection_start']
# convert to datetime
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
yr = collection_start.year - 1
reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce')
# If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018).
mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1')
# error locations
error_locs_header = header.index[mask]
return {'Header': error_locs_header.tolist()}
return error, _validate
def validate_198():
error = ErrorDefinition(
code='198',
description="Child has not been looked after continuously for at least 12 months at 31 March but a reason "
"for no Strengths and Difficulties (SDQ) score has been completed. ",
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_185():
error = ErrorDefinition(
code='185',
description="Child has not been looked after continuously for at least 12 months at " +
"31 March but a Strengths and Difficulties (SDQ) score has been completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_186():
error = ErrorDefinition(
code='186',
description="Children aged 4 or over at the start of the year and children aged under 17 at the " +
"end of the year and who have been looked after for at least 12 months continuously " +
"should have a Strengths and Difficulties (SDQ) score completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_start_str = dfs['metadata']['collection_start']
collection_end_str = dfs['metadata']['collection_end']
collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2 = add_CLA_column(dfs, 'OC2')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17)
error_mask = (
(oc2['4th_bday'] <= collection_start)
& (oc2['17th_bday'] > collection_end)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_187():
error = ErrorDefinition(
code='187',
description="Child cannot be looked after continuously for 12 months at " +
"31 March (OC2) and have any of adoption or care leavers returns completed.",
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3
'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1
)
def _validate(dfs):
if (
'OC3' not in dfs
or 'AD1' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3'])
# OC3
should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM']
oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1)
oc3_error_locs = oc3[oc3_mask].index.to_list()
# AD1
should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR']
ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1)
ad1_error_locs = ad1[ad1_mask].index.to_list()
return {'AD1': ad1_error_locs,
'OC3': oc3_error_locs}
return error, _validate
def validate_188():
error = ErrorDefinition(
code='188',
description="Child is aged under 4 years at the end of the year, "
"but a Strengths and Difficulties (SDQ) score or a reason "
"for no SDQ score has been completed. ",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_end_str = dfs['metadata']['collection_end']
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
error_mask = (
(oc2['4th_bday'] > collection_end)
& oc2[['SDQ_SCORE', 'SDQ_REASON']].notna().any(axis=1)
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_190():
error = ErrorDefinition(
code='190',
description="Child has not been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been completed.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
, # AD1
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_blank = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
mask = ~oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_blank].notna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_191():
error = ErrorDefinition(
code='191',
description="Child has been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been left blank.",
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], # OC2
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_present = ['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE']
mask = oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_present].isna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_607():
error = ErrorDefinition(
code='607',
description='Child ceased to be looked after in the year, but mother field has not been completed.',
affected_fields=['DEC', 'REC', 'MOTHER', 'LS', 'SEX']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
code_list = ['V3', 'V4']
# convert to datetiime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# CEASED_TO_BE_LOOKED_AFTER = DEC is not null and REC is filled but not equal to X1
CEASED_TO_BE_LOOKED_AFTER = merged['DEC'].notna() & ((merged['REC'] != 'X1') & merged['REC'].notna())
# and <LS> not = ‘V3’ or ‘V4’
check_LS = ~(merged['LS'].isin(code_list))
# and <DEC> is in <CURRENT_COLLECTION_YEAR
check_DEC = (collection_start <= merged['DEC']) & (merged['DEC'] <= collection_end)
# Where <CEASED_TO_BE_LOOKED_AFTER> = ‘Y’, and <LS> not = ‘V3’ or ‘V4’ and <DEC> is in <CURRENT_COLLECTION_YEAR> and <SEX> = ‘2’ then <MOTHER> should be provided.
mask = CEASED_TO_BE_LOOKED_AFTER & check_LS & check_DEC & (merged['SEX'] == '2') & (merged['MOTHER'].isna())
header_error_locs = merged.loc[mask, 'index_er']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_210():
error = ErrorDefinition(
code='210',
description='Children looked after for more than a week at 31 March should not have an unknown Unique Pupil Number (UPN) code of UN4.',
affected_fields=['UPN', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_end = dfs['metadata']['collection_end']
# convert to datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
yr = collection_end.year
reference_date = ref_date = pd.to_datetime('24/03/' + str(yr), format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
# the logical way is to merge left on UPN but that will be a one to many merge and may not go as well as a many to one merge that we've been doing.
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <UPN> = 'UN4' then no episode <DECOM> must be >` = 24/03/YYYY Note: YYYY refers to the current collection year.
mask = (merged['UPN'] == 'UN4') & (merged['DECOM'] >= reference_date)
# error locations
error_locs_header = merged.loc[mask, 'index_er']
error_locs_eps = merged.loc[mask, 'index_eps']
return {'Episodes': error_locs_eps.tolist(), 'Header': error_locs_header.unique().tolist()}
return error, _validate
def validate_1010():
error = ErrorDefinition(
code='1010',
description='This child has no episodes loaded for current year even though there was an open episode of '
+ 'care at the end of the previous year, and care leaver data has been entered.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
oc3 = dfs['OC3']
# convert DECOM to datetime, drop missing/invalid sort by CHILD then DECOM,
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = episodes_last.dropna(subset=['DECOM']).sort_values(['CHILD', 'DECOM'], ascending=True)
# Keep only the final episode for each child (ie where the following row has a different CHILD value)
episodes_last = episodes_last[
episodes_last['CHILD'].shift(-1) != episodes_last['CHILD']
]
# Keep only the final episodes that were still open
episodes_last = episodes_last[episodes_last['DEC'].isna()]
# The remaining children ought to have episode data in the current year if they are in OC3
has_current_episodes = oc3['CHILD'].isin(episodes['CHILD'])
has_open_episode_last = oc3['CHILD'].isin(episodes_last['CHILD'])
error_mask = ~has_current_episodes & has_open_episode_last
validation_error_locations = oc3.index[error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_525():
error = ErrorDefinition(
code='525',
description='A child for whom the decision to be placed for adoption has been reversed cannot be adopted during the year.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR',
'LS_ADOPTR']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs or 'AD1' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
ad1 = dfs['AD1']
# prepare to merge
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = placed_adoption.merge(ad1, on='CHILD', how='left', suffixes=['_placed', '_ad1'])
# If <DATE_PLACED_CEASED> not Null, then <DATE_INT>; <DATE_MATCH>; <FOSTER_CARE>; <NB_ADOPTR>; <SEX_ADOPTR>; and <LS_ADOPTR> should not be provided
mask = merged['DATE_PLACED_CEASED'].notna() & (
merged['DATE_INT'].notna() | merged['DATE_MATCH'].notna() | merged['FOSTER_CARE'].notna() |
merged['NB_ADOPTR'].notna() | merged['SEX_ADOPTR'].notna() | merged['LS_ADOPTR'].notna())
# error locations
pa_error_locs = merged.loc[mask, 'index_placed']
ad_error_locs = merged.loc[mask, 'index_ad1']
# return result
return {'PlacedAdoption': pa_error_locs.tolist(), 'AD1': ad_error_locs.tolist()}
return error, _validate
def validate_335():
error = ErrorDefinition(
code='335',
description='The current foster value (0) suggests that child is not adopted by current foster carer, but last placement is A2, A3, or A5. Or the current foster value (1) suggests that child is adopted by current foster carer, but last placement is A1, A4 or A6.',
affected_fields=['PLACE', 'FOSTER_CARE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'AD1' not in dfs:
return {}
else:
episodes = dfs['Episodes']
ad1 = dfs['AD1']
# prepare to merge
episodes.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = episodes.merge(ad1, on='CHILD', how='left', suffixes=['_eps', '_ad1'])
# Where <PL> = 'A2', 'A3' or 'A5' and <DEC> = 'E1', 'E11', 'E12' <FOSTER_CARE> should not be '0'; Where <PL> = ‘A1’, ‘A4’ or ‘A6’ and <REC> = ‘E1’, ‘E11’, ‘E12’ <FOSTER_CARE> should not be ‘1’.
mask = (
merged['REC'].isin(['E1', 'E11', 'E12']) & (
(merged['PLACE'].isin(['A2', 'A3', 'A5']) & (merged['FOSTER_CARE'].astype(str) == '0'))
| (merged['PLACE'].isin(['A1', 'A4', 'A6']) & (merged['FOSTER_CARE'].astype(str) == '1'))
)
)
eps_error_locs = merged.loc[mask, 'index_eps']
ad1_error_locs = merged.loc[mask, 'index_ad1']
# use .unique since join is many to one
return {'Episodes': eps_error_locs.tolist(), 'AD1': ad1_error_locs.unique().tolist()}
return error, _validate
def validate_215():
error = ErrorDefinition(
code='215',
description='Child has care leaver information but one or more data items relating to children looked after for 12 months have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK',
'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
)
def _validate(dfs):
if 'OC3' not in dfs or 'OC2' not in dfs:
return {}
else:
oc3 = dfs['OC3']
oc2 = dfs['OC2']
# prepare to merge
oc3.reset_index(inplace=True)
oc2.reset_index(inplace=True)
merged = oc3.merge(oc2, on='CHILD', how='left', suffixes=['_3', '_2'])
# If any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided then <CONVICTED>; <HEALTH_CHECK>; <IMMUNISATIONS>; <TEETH_CHECK>; <HEALTH_ASSESSMENT>; <SUBSTANCE MISUSE>; <INTERVENTION_RECEIVED>; <INTERVENTION_OFFERED>; should not be provided
mask = (merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) & (
merged['CONVICTED'].notna() | merged['HEALTH_CHECK'].notna() | merged['IMMUNISATIONS'].notna() |
merged['TEETH_CHECK'].notna() | merged['HEALTH_ASSESSMENT'].notna() | merged[
'SUBSTANCE_MISUSE'].notna() | merged['INTERVENTION_RECEIVED'].notna() | merged[
'INTERVENTION_OFFERED'].notna())
# error locations
oc3_error_locs = merged.loc[mask, 'index_3']
oc2_error_locs = merged.loc[mask, 'index_2']
return {'OC3': oc3_error_locs.tolist(), 'OC2': oc2_error_locs.tolist()}
return error, _validate
def validate_399():
error = ErrorDefinition(
code='399',
description='Mother field, review field or participation field are completed but '
+ 'child is looked after under legal status V3 or V4.',
affected_fields=['MOTHER', 'LS', 'REVIEW', 'REVIEW_CODE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'Reviews' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
reviews = dfs['Reviews']
code_list = ['V3', 'V4']
# prepare to merge
episodes['index_eps'] = episodes.index
header['index_hdr'] = header.index
reviews['index_revs'] = reviews.index
# merge
merged = (episodes.merge(header, on='CHILD', how='left')
.merge(reviews, on='CHILD', how='left'))
# If <LS> = 'V3' or 'V4' then <MOTHER>, <REVIEW> and <REVIEW_CODE> should not be provided
mask = merged['LS'].isin(code_list) & (
merged['MOTHER'].notna() | merged['REVIEW'].notna() | merged['REVIEW_CODE'].notna())
# Error locations
eps_errors = merged.loc[mask, 'index_eps']
header_errors = merged.loc[mask, 'index_hdr'].unique()
revs_errors = merged.loc[mask, 'index_revs'].unique()
return {'Episodes': eps_errors.tolist(),
'Header': header_errors.tolist(),
'Reviews': revs_errors.tolist()}
return error, _validate
def validate_189():
error = ErrorDefinition(
code='189',
description='Child is aged 17 years or over at the beginning of the year, but an Strengths and Difficulties '
+ '(SDQ) score or a reason for no Strengths and Difficulties (SDQ) score has been completed.',
affected_fields=['DOB', 'SDQ_SCORE', 'SDQ_REASON']
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
collection_start = dfs['metadata']['collection_start']
# datetime format allows appropriate comparison between dates
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# If <DOB> >17 years prior to <COLLECTION_START_DATE> then <SDQ_SCORE> and <SDQ_REASON> should not be provided
mask = ((oc2['DOB'] + pd.offsets.DateOffset(years=17)) <= collection_start) & (
oc2['SDQ_REASON'].notna() | oc2['SDQ_SCORE'].notna())
# That is, raise error if collection_start > DOB + 17years
oc_error_locs = oc2.index[mask]
return {'OC2': oc_error_locs.tolist()}
return error, _validate
def validate_226():
error = ErrorDefinition(
code='226',
description='Reason for placement change is not required.',
affected_fields=['REASON_PLACE_CHANGE', 'PLACE']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
code_list = ['T0', 'T1', 'T2', 'T3', 'T4']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# create column to see previous REASON_PLACE_CHANGE
episodes = episodes.sort_values(['CHILD', 'DECOM'])
episodes['PREVIOUS_REASON'] = episodes.groupby('CHILD')['REASON_PLACE_CHANGE'].shift(1)
# If <PL> = 'T0'; 'T1'; 'T2'; 'T3' or 'T4' then <REASON_PLACE_CHANGE> should be null in current episode and current episode - 1
mask = episodes['PLACE'].isin(code_list) & (
episodes['REASON_PLACE_CHANGE'].notna() | episodes['PREVIOUS_REASON'].notna())
# error locations
error_locs = episodes.index[mask]
return {'Episodes': error_locs.tolist()}
return error, _validate
def validate_358():
error = ErrorDefinition(
code='358',
description='Child with this legal status should not be under 10.',
affected_fields=['DECOM', 'DOB', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['J1', 'J2', 'J3']
# convert dates to datetime format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# Where <LS> = ‘J1’ or ‘J2’ or ‘J3’ then <DOB> should <= to 10 years prior to <DECOM>
mask = merged['LS'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=10) < merged['DECOM'])
# That is, raise error if DECOM > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_407():
error = ErrorDefinition(
code='407',
description='Reason episode ceased is Special Guardianship Order, but child has reached age 18.',
affected_fields=['DEC', 'DOB', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['E45', 'E46', 'E47', 'E48']
# convert dates to datetime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <REC> = ‘E45’ or ‘E46’ or ‘E47’ or ‘E48’ then <DOB> must be < 18 years prior to <DEC>
mask = merged['REC'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=18) < merged['DEC'])
# That is, raise error if DEC > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_1007():
error = ErrorDefinition(
code='1007',
description='Care leaver information is not required for 17- or 18-year olds who are still looked after.',
affected_fields=['DEC', 'REC', 'DOB', 'IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_end = dfs['metadata']['collection_end']
# convert dates to datetime format
oc3['DOB'] = pd.to_datetime(oc3['DOB'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
oc3.reset_index(inplace=True)
merged = episodes.merge(oc3, on='CHILD', how='left', suffixes=['_eps', '_oc3'])
# If <DOB> < 19 and >= to 17 years prior to <COLLECTION_END_DATE> and current episode <DEC> and or <REC> not provided then <IN_TOUCH>, <ACTIV> and <ACCOM> should not be provided
check_age = (merged['DOB'] + pd.offsets.DateOffset(years=17) <= collection_end) & (
merged['DOB'] + pd.offsets.DateOffset(years=19) > collection_end)
# That is, check that 17<=age<19
check_dec_rec = merged['REC'].isna() | merged['DEC'].isna()
# if either DEC or REC are absent
mask = check_age & check_dec_rec & (
merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna())
# Then raise an error if either IN_TOUCH, ACTIV, or ACCOM have been provided too
# error locations
oc3_error_locs = merged.loc[mask, 'index_oc3']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'OC3': oc3_error_locs.unique().tolist()}
return error, _validate
def validate_442():
error = ErrorDefinition(
code='442',
description='Unique Pupil Number (UPN) field is not completed.',
affected_fields=['UPN', 'LS']
)
def _validate(dfs):
if ('Episodes' not in dfs) or ('Header' not in dfs):
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
code_list = ['V3', 'V4']
# merge left on episodes to get all children for which episodes have been recorded even if they do not exist on the header.
merged = episodes.merge(header, on=['CHILD'], how='left', suffixes=['_eps', '_er'])
# Where any episode present, with an <LS> not = 'V3' or 'V4' then <UPN> must be provided
mask = (~merged['LS'].isin(code_list)) & merged['UPN'].isna()
episode_error_locs = merged.loc[mask, 'index_eps']
header_error_locs = merged.loc[mask, 'index_er']
return {'Episodes': episode_error_locs.tolist(),
# Select unique values since many episodes are joined to one header
# and multiple errors will be raised for the same index.
'Header': header_error_locs.dropna().unique().tolist()}
return error, _validate
def validate_344():
error = ErrorDefinition(
code='344',
description='The record shows the young person has died or returned home to live with parent(s) or someone with parental responsibility for a continuous period of 6 months or more, but activity and/or accommodation on leaving care have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'DIED' or 'RHOM' then <ACTIV> and <ACCOM> should not be provided
mask = ((oc3['IN_TOUCH'] == 'DIED') | (oc3['IN_TOUCH'] == 'RHOM')) & (
oc3['ACTIV'].notna() | oc3['ACCOM'].notna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_345():
error = ErrorDefinition(
code='345',
description='The data collection record shows the local authority is in touch with this young person, but activity and/or accommodation data items are zero.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'Yes' then <ACTIV> and <ACCOM> must be provided
mask = (oc3['IN_TOUCH'] == 'YES') & (oc3['ACTIV'].isna() | oc3['ACCOM'].isna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_384():
error = ErrorDefinition(
code='384',
description='A child receiving respite care cannot be in a long-term foster placement ',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# Where <LS> = 'V3' or 'V4' then <PL> must not be 'U1' or 'U4'
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
(episodes['PLACE'] == 'U1') | (episodes['PLACE'] == 'U4'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_390():
error = ErrorDefinition(
code='390',
description='Reason episode ceased is adopted but child has not been previously placed for adoption.',
affected_fields=['PLACE', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# If <REC> = 'E11' or 'E12' then <PL> must be one of 'A3', 'A4', 'A5' or 'A6'
mask = ((episodes['REC'] == 'E11') | (episodes['REC'] == 'E12')) & ~(
(episodes['PLACE'] == 'A3') | (episodes['PLACE'] == 'A4') | (episodes['PLACE'] == 'A5') | (
episodes['PLACE'] == 'A6'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_378():
error = ErrorDefinition(
code='378',
description='A child who is placed with parent(s) cannot be looked after under a single period of accommodation under Section 20 of the Children Act 1989.',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# the & sign supercedes the ==, so brackets are necessary here
mask = (episodes['PLACE'] == 'P1') & (episodes['LS'] == 'V2')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_398():
error = ErrorDefinition(
code='398',
description='Distance field completed but child looked after under legal status V3 or V4.',
affected_fields=['LS', 'HOME_POST', 'PL_POST']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
episodes['HOME_POST'].notna() | episodes['PL_POST'].notna())
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_451():
error = ErrorDefinition(
code='451',
description='Child is still freed for adoption, but freeing orders could not be applied for since 30 December 2005.',
affected_fields=['DEC', 'REC', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['DEC'].isna() & episodes['REC'].isna() & (episodes['LS'] == 'D1')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_519():
error = ErrorDefinition(
code='519',
description='Data entered on the legal status of adopters shows civil partnership couple, but data entered on genders of adopters does not show it as a couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = (ad1['LS_ADOPTR'] == 'L2') & (
(ad1['SEX_ADOPTR'] != 'MM') & (ad1['SEX_ADOPTR'] != 'FF') & (ad1['SEX_ADOPTR'] != 'MF'))
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_520():
error = ErrorDefinition(
code='520',
description='Data entry on the legal status of adopters shows different gender married couple but data entry on genders of adopters shows it as a same gender couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
# check condition
mask = (ad1['LS_ADOPTR'] == 'L11') & (ad1['SEX_ADOPTR'] != 'MF')
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_522():
error = ErrorDefinition(
code='522',
description='Date of decision that the child should be placed for adoption must be on or before the date that a child should no longer be placed for adoption.',
affected_fields=['DATE_PLACED', 'DATE_PLACED_CEASED']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
# Convert to datetimes
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# Boolean mask
mask = placed_adoption['DATE_PLACED_CEASED'] > placed_adoption['DATE_PLACED']
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_563():
error = ErrorDefinition(
code='563',
description='The child should no longer be placed for adoption but the date of the decision that the child should be placed for adoption is blank',
affected_fields=['DATE_PLACED', 'REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
mask = placed_adoption['REASON_PLACED_CEASED'].notna() & placed_adoption['DATE_PLACED_CEASED'].notna() & \
placed_adoption['DATE_PLACED'].isna()
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_544():
error = ErrorDefinition(
code='544',
description="Any child who has conviction information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['CONVICTED', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
convict = oc2['CONVICTED'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = convict & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_634():
error = ErrorDefinition(
code='634',
description='There are entries for previous permanence options, but child has not started to be looked after from 1 April 2016 onwards.',
affected_fields=['LA_PERM', 'PREV_PERM', 'DATE_PERM', 'DECOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PrevPerm' not in dfs:
return {}
else:
episodes = dfs['Episodes']
prevperm = dfs['PrevPerm']
collection_start = dfs['metadata']['collection_start']
# convert date field to appropriate format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# the maximum date has the highest possibility of satisfying the condition
episodes['LAST_DECOM'] = episodes.groupby('CHILD')['DECOM'].transform('max')
# prepare to merge
episodes.reset_index(inplace=True)
prevperm.reset_index(inplace=True)
merged = prevperm.merge(episodes, on='CHILD', how='left', suffixes=['_prev', '_eps'])
# If <PREV_PERM> or <LA_PERM> or <DATE_PERM> provided, then at least 1 episode must have a <DECOM> later than 01/04/2016
mask = (merged['PREV_PERM'].notna() | merged['DATE_PERM'].notna() | merged['LA_PERM'].notna()) & (
merged['LAST_DECOM'] < collection_start)
eps_error_locs = merged.loc[mask, 'index_eps']
prevperm_error_locs = merged.loc[mask, 'index_prev']
# return {'PrevPerm':prevperm_error_locs}
return {'Episodes': eps_error_locs.unique().tolist(), 'PrevPerm': prevperm_error_locs.unique().tolist()}
return error, _validate
def validate_158():
error = ErrorDefinition(
code='158',
description='If a child has been recorded as receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be left blank.',
affected_fields=['INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
error_mask = oc2['INTERVENTION_RECEIVED'].astype(str).eq('1') & oc2['INTERVENTION_OFFERED'].notna()
error_locations = oc2.index[error_mask]
return {'OC2': error_locations.tolist()}
return error, _validate
def validate_133():
error = ErrorDefinition(
code='133',
description='Data entry for accommodation after leaving care is invalid. If reporting on a childs accommodation after leaving care the data entry must be valid',
affected_fields=['ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
valid_codes = ['B1', 'B2', 'C1', 'C2', 'D1', 'D2', 'E1', 'E2', 'G1', 'G2', 'H1', 'H2', 'K1', 'K2', 'R1',
'R2', 'S2', 'T1', 'T2', 'U1', 'U2', 'V1', 'V2', 'W1', 'W2', 'X2', 'Y1', 'Y2', 'Z1', 'Z2',
'0']
error_mask = ~oc3['ACCOM'].isna() & ~oc3['ACCOM'].isin(valid_codes)
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.tolist()}
return error, _validate
def validate_565():
error = ErrorDefinition(
code='565',
description='The date that the child started to be missing or away from placement without authorisation has been completed but whether the child was missing or away from placement without authorisation has not been completed.',
affected_fields=['MISSING', 'MIS_START']
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
mask = missing['MIS_START'].notna() & missing['MISSING'].isna()
error_locations = missing.index[mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_433():
error = ErrorDefinition(
code='433',
description='The reason for new episode suggests that this is a continuation episode, but the episode does not start on the same day as the last episode finished.',
affected_fields=['RNE', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['original_index'] = episodes.index
episodes.sort_values(['CHILD', 'DECOM', 'DEC'], inplace=True)
episodes[['PREVIOUS_DEC', 'PREVIOUS_CHILD']] = episodes[['DEC', 'CHILD']].shift(1)
rne_is_ongoing = episodes['RNE'].str.upper().astype(str).isin(['P', 'L', 'T', 'U', 'B'])
date_mismatch = episodes['PREVIOUS_DEC'] != episodes['DECOM']
missing_date = episodes['PREVIOUS_DEC'].isna() | episodes['DECOM'].isna()
same_child = episodes['PREVIOUS_CHILD'] == episodes['CHILD']
error_mask = rne_is_ongoing & (date_mismatch | missing_date) & same_child
error_locations = episodes['original_index'].loc[error_mask].sort_values()
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_437():
error = ErrorDefinition(
code='437',
description='Reason episode ceased is child has died or is aged 18 or over but there are further episodes.',
affected_fields=['REC'],
)
# !# potential false negatives, as this only operates on the current year's data
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes.sort_values(['CHILD', 'DECOM'], inplace=True)
episodes[['NEXT_DECOM', 'NEXT_CHILD']] = episodes[['DECOM', 'CHILD']].shift(-1)
# drop rows with missing DECOM as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
ceased_e2_e15 = episodes['REC'].str.upper().astype(str).isin(['E2', 'E15'])
has_later_episode = episodes['CHILD'] == episodes['NEXT_CHILD']
error_mask = ceased_e2_e15 & has_later_episode
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_547():
error = ErrorDefinition(
code='547',
description="Any child who has health promotion information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
healthck = oc2['HEALTH_CHECK'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = healthck & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_635():
error = ErrorDefinition(
code='635',
description='There are entries for date of order and local authority code where previous permanence option was arranged but previous permanence code is Z1',
affected_fields=['LA_PERM', 'DATE_PERM', 'PREV_PERM']
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
else:
prev_perm = dfs['PrevPerm']
# raise and error if either LA_PERM or DATE_PERM are present, yet PREV_PERM is absent.
mask = ((prev_perm['LA_PERM'].notna() | prev_perm['DATE_PERM'].notna()) & prev_perm['PREV_PERM'].isna())
error_locations = prev_perm.index[mask]
return {'PrevPerm': error_locations.to_list()}
return error, _validate
def validate_550():
error = ErrorDefinition(
code='550',
description='A placement provider code of PR0 can only be associated with placement P1.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = (episodes['PLACE'] != 'P1') & episodes['PLACE_PROVIDER'].eq('PR0')
validation_error_locations = episodes.index[mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_217():
error = ErrorDefinition(
code='217',
description='Children who are placed for adoption with current foster carers (placement types A3 or A5) must have a reason for new episode of S, T or U.',
affected_fields=['PLACE', 'DECOM', 'RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('01/04/2015', format='%d/%m/%Y', errors='coerce')
reason_new_ep = ['S', 'T', 'U']
place_codes = ['A3', 'A5']
mask = (episodes['PLACE'].isin(place_codes) & (episodes['DECOM'] >= max_decom_allowed)) & ~episodes[
'RNE'].isin(reason_new_ep)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_518():
error = ErrorDefinition(
code='518',
description='If reporting legal status of adopters is L4 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L4') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_517():
error = ErrorDefinition(
code='517',
description='If reporting legal status of adopters is L3 then the genders of adopters should be coded as MF. MF = the adopting couple are male and female.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L3') & ~AD1['SEX_ADOPTR'].isin(['MF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_558():
error = ErrorDefinition(
code='558',
description='If a child has been adopted, then the decision to place them for adoption has not been disrupted and the date of the decision that a child should no longer be placed for adoption should be left blank. if the REC code is either E11 or E12 then the DATE PLACED CEASED date should not be provided',
affected_fields=['DATE_PLACED_CEASED', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
rec_codes = ['E11', 'E12']
placeEpisodes = episodes[episodes['REC'].isin(rec_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED_CEASED'].notna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_453():
error = ErrorDefinition(
code='453',
description='Contradiction between placement distance in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['PL_DISTANCE'] = pd.to_numeric(episodes['PL_DISTANCE'], errors='coerce')
episodes_last['PL_DISTANCE'] = pd.to_numeric(episodes_last['PL_DISTANCE'], errors='coerce')
# drop rows with missing DECOM before finding idxmin/max, as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
episodes_last = episodes_last.dropna(subset=['DECOM'])
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_dist = abs(episodes_merged['PL_DISTANCE'] - episodes_merged['PL_DISTANCE_last']) >= 0.2
error_mask = in_both_years & same_rne & last_year_open & different_pl_dist
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_516():
error = ErrorDefinition(
code='516',
description='The episode data submitted for this child does not show that he/she was with their former foster carer(s) during the year.If the code in the reason episode ceased is E45 or E46 the child must have a placement code of U1 to U6.',
affected_fields=['REC', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
place_codes = ['U1', 'U2', 'U3', 'U4', 'U5', 'U6']
rec_codes = ['E45', 'E46']
error_mask = episodes['REC'].isin(rec_codes) & ~episodes['PLACE'].isin(place_codes)
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_511():
error = ErrorDefinition(
code='511',
description='If reporting that the number of person(s) adopting the looked after child is two adopters then the code should only be MM, FF or MF. MM = the adopting couple are both males; FF = the adopting couple are both females; MF = The adopting couple are male and female.',
affected_fields=['NB_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
mask = AD1['NB_ADOPTR'].astype(str).eq('2') & AD1['SEX_ADOPTR'].isin(['M1', 'F1'])
validation_error_mask = mask
validation_error_locations = AD1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_524():
error = ErrorDefinition(
code='524',
description='If reporting legal status of adopters is L12 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L12') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_441():
error = ErrorDefinition(
code='441',
description='Participation method indicates child was 4 years old or over at the time of the review, but the date of birth and review date indicates the child was under 4 years old.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
reviews = reviews.dropna(subset=['REVIEW', 'DOB'])
mask = reviews['REVIEW_CODE'].isin(['PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']) & (
reviews['REVIEW'] < reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_184():
error = ErrorDefinition(
code='184',
description='Date of decision that a child should be placed for adoption is before the child was born.',
affected_fields=['DATE_PLACED', # PlacedAdoptino
'DOB'], # Header
)
def _validate(dfs):
if 'Header' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
child_record = dfs['Header']
placed_for_adoption = dfs['PlacedAdoption']
all_data = (placed_for_adoption
.reset_index()
.merge(child_record, how='left', on='CHILD', suffixes=[None, '_P4A']))
all_data['DATE_PLACED'] = pd.to_datetime(all_data['DATE_PLACED'], format='%d/%m/%Y', errors='coerce')
all_data['DOB'] = pd.to_datetime(all_data['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (all_data['DATE_PLACED'] >= all_data['DOB']) | all_data['DATE_PLACED'].isna()
validation_error = ~mask
validation_error_locations = all_data[validation_error]['index'].unique()
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_612():
error = ErrorDefinition(
code='612',
description="Date of birth field has been completed but mother field indicates child is not a mother.",
affected_fields=['SEX', 'MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
error_mask = (
((header['MOTHER'].astype(str) == '0') | header['MOTHER'].isna())
& (header['SEX'].astype(str) == '2')
& header['MC_DOB'].notna()
)
validation_error_locations = header.index[error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_552():
"""
This error checks that the first adoption episode is after the last decision !
If there are multiple of either there may be unexpected results !
"""
error = ErrorDefinition(
code="552",
description="Date of Decision to place a child for adoption should be on or prior to the date that the child was placed for adoption.",
# Field that defines date of decision to place a child for adoption is DATE_PLACED and the start of adoption is defined by DECOM with 'A' placement types.
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
# get the required datasets
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
# keep index values so that they stay the same when needed later on for error locations
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
adoption_eps = episodes[episodes['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])].copy()
# find most recent adoption decision
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# remove rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption['DATE_PLACED'].notna()]
placed_adoption_inds = placed_adoption.groupby('CHILD')['DATE_PLACED'].idxmax(skipna=True)
last_decision = placed_adoption.loc[placed_adoption_inds]
# first time child started adoption
adoption_eps["DECOM"] = pd.to_datetime(adoption_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
adoption_eps = adoption_eps[adoption_eps['DECOM'].notna()]
adoption_eps_inds = adoption_eps.groupby('CHILD')['DECOM'].idxmin(skipna=True)
# full information of first adoption
first_adoption = adoption_eps.loc[adoption_eps_inds]
# date of decision and date of start of adoption (DECOM) have to be put in one table
merged = first_adoption.merge(last_decision, on=['CHILD'], how='left', suffixes=['_EP', '_PA'])
# check to see if date of decision to place is less than or equal to date placed.
decided_after_placed = merged["DECOM"] < merged["DATE_PLACED"]
# find the corresponding location of error values per file.
episode_error_locs = merged.loc[decided_after_placed, 'index_EP']
placedadoption_error_locs = merged.loc[decided_after_placed, 'index_PA']
return {"PlacedAdoption": placedadoption_error_locs.to_list(), "Episodes": episode_error_locs.to_list()}
return error, _validate
def validate_551():
error = ErrorDefinition(
code='551',
description='Child has been placed for adoption but there is no date of the decision that the child should be placed for adoption.',
affected_fields=['DATE_PLACED', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
place_codes = ['A3', 'A4', 'A5', 'A6']
placeEpisodes = episodes[episodes['PLACE'].isin(place_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'].isna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_557():
error = ErrorDefinition(
code='557',
description="Child for whom the decision was made that they should be placed for adoption has left care " +
"but was not adopted and information on the decision that they should no longer be placed for " +
"adoption items has not been completed.",
affected_fields=['DATE_PLACED_CEASED', 'REASON_PLACED_CEASED', # PlacedAdoption
'PLACE', 'LS', 'REC'], # Episodes
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'PlacedAdoption' not in dfs:
return {}
else:
eps = dfs['Episodes']
placed = dfs['PlacedAdoption']
eps = eps.reset_index()
placed = placed.reset_index()
child_placed = eps['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])
order_granted = eps['LS'].isin(['D1', 'E1'])
not_adopted = ~eps['REC'].isin(['E11', 'E12']) & eps['REC'].notna()
placed['ceased_incomplete'] = (
placed['DATE_PLACED_CEASED'].isna() | placed['REASON_PLACED_CEASED'].isna()
)
eps = eps[(child_placed | order_granted) & not_adopted]
eps = eps.merge(placed, on='CHILD', how='left', suffixes=['_EP', '_PA'], indicator=True)
eps = eps[(eps['_merge'] == 'left_only') | eps['ceased_incomplete']]
EP_errors = eps['index_EP']
PA_errors = eps['index_PA'].dropna()
return {
'Episodes': EP_errors.to_list(),
'PlacedAdoption': PA_errors.to_list(),
}
return error, _validate
def validate_207():
error = ErrorDefinition(
code='207',
description='Mother status for the current year disagrees with the mother status already recorded for this child.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
mother_is_different = header_merged['MOTHER'].astype(str) != header_merged['MOTHER_last'].astype(str)
mother_was_true = header_merged['MOTHER_last'].astype(str) == '1'
error_mask = in_both_years & mother_is_different & mother_was_true
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_523():
error = ErrorDefinition(
code='523',
description="Date of decision that the child should be placed for adoption should be the same date as the decision that adoption is in the best interest (date should be placed).",
affected_fields=['DATE_PLACED', 'DATE_INT'],
)
def _validate(dfs):
if ("AD1" not in dfs) or ("PlacedAdoption" not in dfs):
return {}
else:
placed_adoption = dfs["PlacedAdoption"]
ad1 = dfs["AD1"]
# keep initial index values to be reused for locating errors later on.
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
# convert to datetime to enable comparison
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format="%d/%m/%Y",
errors='coerce')
ad1["DATE_INT"] = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce')
# drop rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption["DATE_PLACED"].notna()]
ad1 = ad1[ad1["DATE_INT"].notna()]
# bring corresponding values together from both dataframes
merged_df = placed_adoption.merge(ad1, on=['CHILD'], how='inner', suffixes=["_AD", "_PA"])
# find error values
different_dates = merged_df['DATE_INT'] != merged_df['DATE_PLACED']
# map error locations to corresponding indices
pa_error_locations = merged_df.loc[different_dates, 'index_PA']
ad1_error_locations = merged_df.loc[different_dates, 'index_AD']
return {"PlacedAdoption": pa_error_locations.to_list(), "AD1": ad1_error_locations.to_list()}
return error, _validate
def validate_3001():
error = ErrorDefinition(
code='3001',
description='Where care leavers information is being returned for a young person around their 17th birthday, the accommodation cannot be with their former foster carer(s).',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
oc3 = dfs['OC3']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB17'] = header['DOB'] + pd.DateOffset(years=17)
oc3_merged = oc3.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
accom_foster = oc3_merged['ACCOM'].str.upper().astype(str).isin(['Z1', 'Z2'])
age_17_in_year = (oc3_merged['DOB17'] <= collection_end) & (oc3_merged['DOB17'] >= collection_start)
error_mask = accom_foster & age_17_in_year
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_389():
error = ErrorDefinition(
code='389',
description='Reason episode ceased is that child transferred to care of adult social care services, but child is aged under 16.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB16'] = header['DOB'] + pd.DateOffset(years=16)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_asc = episodes_merged['REC'].str.upper().astype(str).isin(['E7'])
ceased_over_16 = episodes_merged['DOB16'] <= episodes_merged['DEC']
error_mask = ceased_asc & ~ceased_over_16
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_387():
error = ErrorDefinition(
code='387',
description='Reason episode ceased is child moved into independent living arrangement, but the child is aged under 14.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB14'] = header['DOB'] + pd.DateOffset(years=14)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_indep = episodes_merged['REC'].str.upper().astype(str).isin(['E5', 'E6'])
ceased_over_14 = episodes_merged['DOB14'] <= episodes_merged['DEC']
dec_present = episodes_merged['DEC'].notna()
error_mask = ceased_indep & ~ceased_over_14 & dec_present
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_452():
error = ErrorDefinition(
code='452',
description='Contradiction between local authority of placement code in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_la = episodes_merged['PL_LA'].astype(str) != episodes_merged['PL_LA_last'].astype(str)
error_mask = in_both_years & same_rne & last_year_open & different_pl_la
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_386():
error = ErrorDefinition(
code='386',
description='Reason episode ceased is adopted but child has reached age 18.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = (
episodes
.reset_index()
.merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True)
.set_index('index')
.dropna(subset=['DOB18', 'DEC'])
)
ceased_adopted = episodes_merged['REC'].str.upper().astype(str).isin(['E11', 'E12'])
ceased_under_18 = episodes_merged['DOB18'] > episodes_merged['DEC']
error_mask = ceased_adopted & ~ceased_under_18
error_locations = episodes_merged.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_363():
error = ErrorDefinition(
code='363',
description='Child assessment order (CAO) lasted longer than 7 days allowed in the Children Act 1989.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
L2_eps = episodes[episodes['LS'] == 'L3'].copy()
L2_eps['original_index'] = L2_eps.index
L2_eps = L2_eps[L2_eps['DECOM'].notna()]
L2_eps.loc[L2_eps['DEC'].isna(), 'DEC'] = collection_end_str
L2_eps['DECOM'] = pd.to_datetime(L2_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
L2_eps = L2_eps.dropna(subset=['DECOM'])
L2_eps['DEC'] = pd.to_datetime(L2_eps['DEC'], format='%d/%m/%Y', errors='coerce')
L2_eps.sort_values(['CHILD', 'DECOM'])
L2_eps['index'] = pd.RangeIndex(0, len(L2_eps))
L2_eps['index+1'] = L2_eps['index'] + 1
L2_eps = L2_eps.merge(L2_eps, left_on='index', right_on='index+1',
how='left', suffixes=[None, '_prev'])
L2_eps = L2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']]
L2_eps['new_period'] = (
(L2_eps['DECOM'] > L2_eps['DEC_prev'])
| (L2_eps['CHILD'] != L2_eps['CHILD_prev'])
)
L2_eps['duration'] = (L2_eps['DEC'] - L2_eps['DECOM']).dt.days
L2_eps['period_id'] = L2_eps['new_period'].astype(int).cumsum()
L2_eps['period_duration'] = L2_eps.groupby('period_id')['duration'].transform(sum)
error_mask = L2_eps['period_duration'] > 7
return {'Episodes': L2_eps.loc[error_mask, 'original_index'].to_list()}
return error, _validate
def validate_364():
error = ErrorDefinition(
code='364',
description='Sections 41-46 of Police and Criminal Evidence (PACE; 1984) severely limits ' +
'the time a child can be detained in custody in Local Authority (LA) accommodation.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
J2_eps = episodes[episodes['LS'] == 'J2'].copy()
J2_eps['original_index'] = J2_eps.index
J2_eps['DECOM'] = pd.to_datetime(J2_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
J2_eps = J2_eps[J2_eps['DECOM'].notna()]
J2_eps.loc[J2_eps['DEC'].isna(), 'DEC'] = collection_end_str
J2_eps['DEC'] = pd.to_datetime(J2_eps['DEC'], format='%d/%m/%Y', errors='coerce')
J2_eps.sort_values(['CHILD', 'DECOM'])
J2_eps['index'] = pd.RangeIndex(0, len(J2_eps))
J2_eps['index_prev'] = J2_eps['index'] + 1
J2_eps = J2_eps.merge(J2_eps, left_on='index', right_on='index_prev',
how='left', suffixes=[None, '_prev'])
J2_eps = J2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']]
J2_eps['new_period'] = (
(J2_eps['DECOM'] > J2_eps['DEC_prev'])
| (J2_eps['CHILD'] != J2_eps['CHILD_prev'])
)
J2_eps['duration'] = (J2_eps['DEC'] - J2_eps['DECOM']).dt.days
J2_eps['period_id'] = J2_eps['new_period'].astype(int).cumsum()
J2_eps['period_duration'] = J2_eps.groupby('period_id')['duration'].transform(sum)
error_mask = J2_eps['period_duration'] > 21
return {'Episodes': J2_eps.loc[error_mask, 'original_index'].to_list()}
return error, _validate
def validate_365():
error = ErrorDefinition(
code='365',
description='Any individual short- term respite placement must not exceed 17 days.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
episodes.loc[episodes['DEC'].isna(), 'DEC'] = collection_end_str
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
over_17_days = episodes['DEC'] > episodes['DECOM'] + pd.DateOffset(days=17)
error_mask = (episodes['LS'] == 'V3') & over_17_days
return {'Episodes': episodes.index[error_mask].to_list()}
return error, _validate
def validate_367():
error = ErrorDefinition(
code='367',
description='The maximum amount of respite care allowable is 75 days in any 12-month period.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
V3_eps = episodes[episodes['LS'] == 'V3']
V3_eps = V3_eps.dropna(subset=['DECOM']) # missing DECOM should get fixed before looking for this error
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
V3_eps['DECOM_dt'] = pd.to_datetime(V3_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
V3_eps['DEC_dt'] = pd.to_datetime(V3_eps['DEC'], format='%d/%m/%Y', errors='coerce')
# truncate episode start/end dates to collection start/end respectively
V3_eps.loc[V3_eps['DEC'].isna() | (V3_eps['DEC_dt'] > collection_end), 'DEC_dt'] = collection_end
V3_eps.loc[V3_eps['DECOM_dt'] < collection_start, 'DECOM_dt'] = collection_start
V3_eps['duration'] = (V3_eps['DEC_dt'] - V3_eps['DECOM_dt']).dt.days
V3_eps = V3_eps[V3_eps['duration'] > 0]
V3_eps['year_total_duration'] = V3_eps.groupby('CHILD')['duration'].transform(sum)
error_mask = V3_eps['year_total_duration'] > 75
return {'Episodes': V3_eps.index[error_mask].to_list()}
return error, _validate
def validate_440():
error = ErrorDefinition(
code='440',
description='Participation method indicates child was under 4 years old at the time of the review, but date of birth and review date indicates the child was 4 years old or over.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
mask = reviews['REVIEW_CODE'].eq('PN0') & (
reviews['REVIEW'] > reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_445():
error = ErrorDefinition(
code='445',
description='D1 is not a valid code for episodes starting after December 2005.',
affected_fields=['LS', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('31/12/2005', format='%d/%m/%Y', errors='coerce')
mask = episodes['LS'].eq('D1') & (episodes['DECOM'] > max_decom_allowed)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_446():
error = ErrorDefinition(
code='446',
description='E1 is not a valid code for episodes starting before December 2005.',
affected_fields=['LS', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
min_decom_allowed = pd.to_datetime('01/12/2005', format='%d/%m/%Y', errors='coerce')
mask = episodes['LS'].eq('E1') & (episodes['DECOM'] < min_decom_allowed)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_208():
error = ErrorDefinition(
code='208',
description='Unique Pupil Number (UPN) for the current year disagrees with the Unique Pupil Number (UPN) already recorded for this child.',
affected_fields=['UPN'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
upn_is_different = header_merged['UPN'].str.upper().astype(str) != header_merged[
'UPN_last'].str.upper().astype(str)
upn_not_recorded = header_merged['UPN'].str.upper().astype(str).isin(['UN2', 'UN3', 'UN4', 'UN5', 'UN6']) & \
header_merged['UPN_last'].str.upper().astype(str).isin(['UN1'])
error_mask = in_both_years & upn_is_different & ~upn_not_recorded
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_204():
error = ErrorDefinition(
code='204',
description='Ethnic origin code disagrees with the ethnic origin already recorded for this child.',
affected_fields=['ETHNIC'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
ethnic_is_different = header_merged['ETHNIC'].astype(str).str.upper() != header_merged[
'ETHNIC_last'].astype(str).str.upper()
error_mask = in_both_years & ethnic_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_203():
error = ErrorDefinition(
code='203',
description='Date of birth disagrees with the date of birth already recorded for this child.',
affected_fields=['DOB'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header_last['DOB'] = pd.to_datetime(header_last['DOB'], format='%d/%m/%Y', errors='coerce')
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
dob_is_different = header_merged['DOB'].astype(str) != header_merged['DOB_last'].astype(str)
error_mask = in_both_years & dob_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_530():
error = ErrorDefinition(
code='530',
description="A placement provider code of PR4 cannot be associated with placement P1.",
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['PLACE'].eq('P1') & episodes['PLACE_PROVIDER'].eq('PR4')
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_571():
error = ErrorDefinition(
code='571',
description='The date that the child ceased to be missing or away from placement without authorisation is before the start or after the end of the collection year.',
affected_fields=['MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
missing['fMIS_END'] = pd.to_datetime(missing['MIS_END'], format='%d/%m/%Y', errors='coerce')
end_date_before_year = missing['fMIS_END'] < collection_start
end_date_after_year = missing['fMIS_END'] > collection_end
error_mask = end_date_before_year | end_date_after_year
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_1005():
error = ErrorDefinition(
code='1005',
description='The end date of the missing episode or episode that the child was away from placement without authorisation is not a valid date.',
affected_fields=['MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
missing['fMIS_END'] = pd.to_datetime(missing['MIS_END'], format='%d/%m/%Y', errors='coerce')
missing_end_date = missing['MIS_END'].isna()
invalid_end_date = missing['fMIS_END'].isna()
error_mask = ~missing_end_date & invalid_end_date
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_1004():
error = ErrorDefinition(
code='1004',
description='The start date of the missing episode or episode that the child was away from placement without authorisation is not a valid date.',
affected_fields=['MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
missing['fMIS_START'] = pd.to_datetime(missing['MIS_START'], format='%d/%m/%Y', errors='coerce')
missing_start_date = missing['MIS_START'].isna()
invalid_start_date = missing['fMIS_START'].isna()
error_mask = missing_start_date | invalid_start_date
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_202():
error = ErrorDefinition(
code='202',
description='The gender code conflicts with the gender already recorded for this child.',
affected_fields=['SEX'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
sex_is_different = header_merged['SEX'].astype(str) != header_merged['SEX_last'].astype(str)
error_mask = in_both_years & sex_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_621():
error = ErrorDefinition(
code='621',
description="Mother’s field has been completed but date of birth shows that the mother is younger than her child.",
affected_fields=['DOB', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
header['MC_DOB'] = pd.to_datetime(header['MC_DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (header['MC_DOB'] > header['DOB']) | header['MC_DOB'].isna()
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_556():
error = ErrorDefinition(
code='556',
description='Date of decision that the child should be placed for adoption should be on or prior to the date that the freeing order was granted.',
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placedAdoptions['DATE_PLACED'] = pd.to_datetime(placedAdoptions['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
episodes = episodes.reset_index()
D1Episodes = episodes[episodes['LS'] == 'D1']
merged = D1Episodes.reset_index().merge(placedAdoptions, how='left', on='CHILD', ).set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'] > merged['DECOM']]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_393():
error = ErrorDefinition(
code='393',
description='Child is looked after but mother field is not completed.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header_female = header[header['SEX'].astype(str) == '2']
applicable_episodes = episodes[~episodes['LS'].str.upper().isin(['V3', 'V4'])]
error_mask = header_female['CHILD'].isin(applicable_episodes['CHILD']) & header_female['MOTHER'].isna()
error_locations = header_female.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_NoE():
error = ErrorDefinition(
code='NoE',
description='This child has no episodes loaded for previous year even though child started to be looked after before this current year.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = dfs['Episodes_last']
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
episodes_before_year = episodes[episodes['DECOM'] < collection_start]
episodes_merged = episodes_before_year.reset_index().merge(episodes_last, how='left', on=['CHILD'],
indicator=True).set_index('index')
episodes_not_matched = episodes_merged[episodes_merged['_merge'] == 'left_only']
error_mask = episodes.index.isin(episodes_not_matched.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_356():
error = ErrorDefinition(
code='356',
description='The date the episode ceased is before the date the same episode started.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
error_mask = episodes['DEC'].notna() & (episodes['DEC'] < episodes['DECOM'])
return {'Episodes': episodes.index[error_mask].to_list()}
return error, _validate
def validate_611():
error = ErrorDefinition(
code='611',
description="Date of birth field is blank, but child is a mother.",
affected_fields=['MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
validation_error_mask = header['MOTHER'].astype(str).isin(['1']) & header['MC_DOB'].isna()
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_1009():
error = ErrorDefinition(
code='1009',
description='Reason for placement change is not a valid code.',
affected_fields=['REASON_PLACE_CHANGE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'CARPL',
'CLOSE',
'ALLEG',
'STAND',
'APPRR',
'CREQB',
'CREQO',
'CHILD',
'LAREQ',
'PLACE',
'CUSTOD',
'OTHER'
]
mask = episodes['REASON_PLACE_CHANGE'].isin(code_list) | episodes['REASON_PLACE_CHANGE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_1006():
error = ErrorDefinition(
code='1006',
description='Missing type invalid.',
affected_fields=['MISSING'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
missing_from_care = dfs['Missing']
code_list = ['M', 'A']
mask = missing_from_care['MISSING'].isin(code_list) | missing_from_care['MISSING'].isna()
validation_error_mask = ~mask
validation_error_locations = missing_from_care.index[validation_error_mask]
return {'Missing': validation_error_locations.tolist()}
return error, _validate
def validate_631():
error = ErrorDefinition(
code='631',
description='Previous permanence option not a valid value.',
affected_fields=['PREV_PERM'],
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
previous_permanence = dfs['PrevPerm']
code_list = ['P1', 'P2', 'P3', 'P4', 'Z1']
mask = previous_permanence['PREV_PERM'].isin(code_list) | previous_permanence['PREV_PERM'].isna()
validation_error_mask = ~mask
validation_error_locations = previous_permanence.index[validation_error_mask]
return {'PrevPerm': validation_error_locations.tolist()}
return error, _validate
def validate_196():
error = ErrorDefinition(
code='196',
description='Strengths and Difficulties (SDQ) reason is not a valid code.',
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
code_list = ['SDQ1', 'SDQ2', 'SDQ3', 'SDQ4', 'SDQ5']
mask = oc2['SDQ_REASON'].isin(code_list) | oc2['SDQ_REASON'].isna()
validation_error_mask = ~mask
validation_error_locations = oc2.index[validation_error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_177():
error = ErrorDefinition(
code='177',
description='The legal status of adopter(s) code is not a valid code.',
affected_fields=['LS_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['L0', 'L11', 'L12', 'L2', 'L3', 'L4']
mask = adoptions['LS_ADOPTR'].isin(code_list) | adoptions['LS_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_176():
error = ErrorDefinition(
code='176',
description='The gender of adopter(s) at the date of adoption code is not a valid code.',
affected_fields=['SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['M1', 'F1', 'MM', 'FF', 'MF']
mask = adoptions['SEX_ADOPTR'].isin(code_list) | adoptions['SEX_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_175():
error = ErrorDefinition(
code='175',
description='The number of adopter(s) code is not a valid code.',
affected_fields=['NB_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['1', '2']
mask = adoptions['NB_ADOPTR'].astype(str).isin(code_list) | adoptions['NB_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_132():
error = ErrorDefinition(
code='132',
description='Data entry for activity after leaving care is invalid.',
affected_fields=['ACTIV'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
care_leavers = dfs['OC3']
code_list = [
'F1',
'P1',
'F2',
'P2',
'F4',
'P4',
'F5',
'P5',
'G4',
'G5',
'G6',
'0'
]
mask = care_leavers['ACTIV'].astype(str).isin(code_list) | care_leavers['ACTIV'].isna()
validation_error_mask = ~mask
validation_error_locations = care_leavers.index[validation_error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_131():
error = ErrorDefinition(
code='131',
description='Data entry for being in touch after leaving care is invalid.',
affected_fields=['IN_TOUCH'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
care_leavers = dfs['OC3']
code_list = [
'YES',
'NO',
'DIED',
'REFU',
'NREQ',
'RHOM'
]
mask = care_leavers['IN_TOUCH'].isin(code_list) | care_leavers['IN_TOUCH'].isna()
validation_error_mask = ~mask
validation_error_locations = care_leavers.index[validation_error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_120():
error = ErrorDefinition(
code='120',
description='The reason for the reversal of the decision that the child should be placed for adoption code is not valid.',
affected_fields=['REASON_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
placed_adoptions = dfs['PlacedAdoption']
code_list = ['RD1', 'RD2', 'RD3', 'RD4']
mask = placed_adoptions['REASON_PLACED_CEASED'].isin(code_list) | placed_adoptions[
'REASON_PLACED_CEASED'].isna()
validation_error_mask = ~mask
validation_error_locations = placed_adoptions.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_114():
error = ErrorDefinition(
code='114',
description='Data entry to record the status of former carer(s) of an adopted child is invalid.',
affected_fields=['FOSTER_CARE'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['0', '1']
mask = adoptions['FOSTER_CARE'].astype(str).isin(code_list) | adoptions['FOSTER_CARE'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_178():
error = ErrorDefinition(
code='178',
description='Placement provider code is not a valid code.',
affected_fields=['PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list_placement_provider = ['PR0', 'PR1', 'PR2', 'PR3', 'PR4', 'PR5']
code_list_placement_with_no_provider = ['T0', 'T1', 'T2', 'T3', 'T4', 'Z1']
place_provider_needed_and_correct = episodes['PLACE_PROVIDER'].isin(code_list_placement_provider) & ~episodes[
'PLACE'].isin(code_list_placement_with_no_provider)
place_provider_not_provided = episodes['PLACE_PROVIDER'].isna()
place_provider_not_needed = episodes['PLACE_PROVIDER'].isna() & episodes['PLACE'].isin(
code_list_placement_with_no_provider)
mask = place_provider_needed_and_correct | place_provider_not_provided | place_provider_not_needed
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_103():
error = ErrorDefinition(
code='103',
description='The ethnicity code is either not valid or has not been entered.',
affected_fields=['ETHNIC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
header = dfs['Header']
code_list = [
'WBRI',
'WIRI',
'WOTH',
'WIRT',
'WROM',
'MWBC',
'MWBA',
'MWAS',
'MOTH',
'AIND',
'APKN',
'ABAN',
'AOTH',
'BCRB',
'BAFR',
'BOTH',
'CHNE',
'OOTH',
'REFU',
'NOBT'
]
mask = header['ETHNIC'].isin(code_list)
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_143():
error = ErrorDefinition(
code='143',
description='The reason for new episode code is not a valid code.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = ['S', 'P', 'L', 'T', 'U', 'B']
mask = episodes['RNE'].isin(code_list) | episodes['RNE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_144():
error = ErrorDefinition(
code='144',
description='The legal status code is not a valid code.',
affected_fields=['LS'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'C1',
'C2',
'D1',
'E1',
'V2',
'V3',
'V4',
'J1',
'J2',
'J3',
'L1',
'L2',
'L3'
]
mask = episodes['LS'].isin(code_list) | episodes['LS'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_145():
error = ErrorDefinition(
code='145',
description='Category of need code is not a valid code.',
affected_fields=['CIN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'N1',
'N2',
'N3',
'N4',
'N5',
'N6',
'N7',
'N8',
]
mask = episodes['CIN'].isin(code_list) | episodes['CIN'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_146():
error = ErrorDefinition(
code='146',
description='Placement type code is not a valid code.',
affected_fields=['PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'A3',
'A4',
'A5',
'A6',
'H5',
'K1',
'K2',
'P1',
'P2',
'P3',
'R1',
'R2',
'R3',
'R5',
'S1',
'T0',
'T1',
'T2',
'T3',
'T4',
'U1',
'U2',
'U3',
'U4',
'U5',
'U6',
'Z1'
]
mask = episodes['PLACE'].isin(code_list) | episodes['PLACE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_149():
error = ErrorDefinition(
code='149',
description='Reason episode ceased code is not valid. ',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'E11',
'E12',
'E2',
'E3',
'E4A',
'E4B',
'E13',
'E41',
'E45',
'E46',
'E47',
'E48',
'E5',
'E6',
'E7',
'E8',
'E9',
'E14',
'E15',
'E16',
'E17',
'X1'
]
mask = episodes['REC'].isin(code_list) | episodes['REC'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_167():
error = ErrorDefinition(
code='167',
description='Data entry for participation is invalid or blank.',
affected_fields=['REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
review = dfs['Reviews']
code_list = ['PN0', 'PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']
mask = review['REVIEW'].notna() & review['REVIEW_CODE'].isin(code_list) | review['REVIEW'].isna() & review[
'REVIEW_CODE'].isna()
validation_error_mask = ~mask
validation_error_locations = review.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_101():
error = ErrorDefinition(
code='101',
description='Gender code is not valid.',
affected_fields=['SEX'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
header = dfs['Header']
code_list = ['1', '2']
mask = header['SEX'].astype(str).isin(code_list)
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_141():
error = ErrorDefinition(
code='141',
description='Date episode began is not a valid date.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce').notna()
na_location = episodes['DECOM'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_147():
error = ErrorDefinition(
code='147',
description='Date episode ceased is not a valid date.',
affected_fields=['DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce').notna()
na_location = episodes['DEC'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_171():
error = ErrorDefinition(
code='171',
description="Date of birth of mother's child is not a valid date.",
affected_fields=['MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
mask = pd.to_datetime(header['MC_DOB'], format='%d/%m/%Y', errors='coerce').notna()
na_location = header['MC_DOB'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_102():
error = ErrorDefinition(
code='102',
description='Date of birth is not a valid date.',
affected_fields=['DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
mask = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce').notna()
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_112():
error = ErrorDefinition(
code='112',
description='Date should be placed for adoption is not a valid date.',
affected_fields=['DATE_INT'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce').notna()
na_location = ad1['DATE_INT'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = ad1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_115():
error = ErrorDefinition(
code='115',
description="Date of Local Authority's (LA) decision that a child should be placed for adoption is not a valid date.",
affected_fields=['DATE_PLACED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
mask = pd.to_datetime(adopt['DATE_PLACED'], format='%d/%m/%Y', errors='coerce').notna()
na_location = adopt['DATE_PLACED'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = adopt.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_116():
error = ErrorDefinition(
code='116',
description="Date of Local Authority's (LA) decision that a child should no longer be placed for adoption is not a valid date.",
affected_fields=['DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
mask = pd.to_datetime(adopt['DATE_PLACED_CEASED'], format='%d/%m/%Y', errors='coerce').notna()
na_location = adopt['DATE_PLACED_CEASED'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = adopt.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_392c():
error = ErrorDefinition(
code='392c',
description='Postcode(s) provided are invalid.',
affected_fields=['HOME_POST', 'PL_POST'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
home_provided = episodes['HOME_POST'].notna()
home_details = merge_postcodes(episodes, "HOME_POST")
home_valid = home_details['pcd'].notna()
pl_provided = episodes['PL_POST'].notna()
pl_details = merge_postcodes(episodes, "PL_POST")
pl_valid = pl_details['pcd'].notna()
error_mask = (home_provided & ~home_valid) | (pl_provided & ~pl_valid)
return {'Episodes': episodes.index[error_mask].tolist()}
return error, _validate
def validate_213():
error = ErrorDefinition(
code='213',
description='Placement provider information not required.',
affected_fields=['PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['PLACE'].isin(['T0', 'T1', 'T2', 'T3', 'T4', 'Z1']) & df['PLACE_PROVIDER'].notna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_168():
error = ErrorDefinition(
code='168',
description='Unique Pupil Number (UPN) is not valid. If unknown, default codes should be UN1, UN2, UN3, UN4 or UN5.',
affected_fields=['UPN'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
df = dfs['Header']
mask = df['UPN'].str.match(r'(^((?![IOS])[A-Z]){1}(\d{12}|\d{11}[A-Z]{1})$)|^(UN[1-5])$', na=False)
mask = ~mask
return {'Header': df.index[mask].tolist()}
return error, _validate
def validate_388():
error = ErrorDefinition(
code='388',
description='Reason episode ceased is coded new episode begins, but there is no continuation episode.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DECOM'] = pd.to_datetime(df['DECOM'], format='%d/%m/%Y', errors='coerce')
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
df['DECOM'] = df['DECOM'].fillna('01/01/1901') # Watch for potential future issues
df = df.sort_values(['CHILD', 'DECOM'])
df['DECOM_NEXT_EPISODE'] = df.groupby(['CHILD'])['DECOM'].shift(-1)
# The max DECOM for each child is also the one with no next episode
# And we also add the skipna option
# grouped_decom_by_child = df.groupby(['CHILD'])['DECOM'].idxmax(skipna=True)
no_next = df.DECOM_NEXT_EPISODE.isna() & df.CHILD.notna()
# Dataframe with the maximum DECOM removed
max_decom_removed = df[~no_next]
# Dataframe with the maximum DECOM only
max_decom_only = df[no_next]
# Case 1: If reason episode ceased is coded X1 there must be a subsequent episode
# starting on the same day.
case1 = max_decom_removed[(max_decom_removed['REC'] == 'X1') &
(max_decom_removed['DEC'].notna()) &
(max_decom_removed['DECOM_NEXT_EPISODE'].notna()) &
(max_decom_removed['DEC'] != max_decom_removed['DECOM_NEXT_EPISODE'])]
# Case 2: If an episode ends but the child continues to be looked after, a new
# episode should start on the same day.The reason episode ceased code of
# the episode which ends must be X1.
case2 = max_decom_removed[(max_decom_removed['REC'] != 'X1') &
(max_decom_removed['REC'].notna()) &
(max_decom_removed['DEC'].notna()) &
(max_decom_removed['DECOM_NEXT_EPISODE'].notna()) &
(max_decom_removed['DEC'] == max_decom_removed['DECOM_NEXT_EPISODE'])]
# Case 3: If a child ceases to be looked after reason episode ceased code X1 must
# not be used.
case3 = max_decom_only[(max_decom_only['DEC'].notna()) &
(max_decom_only['REC'] == 'X1')]
mask_case1 = case1.index.tolist()
mask_case2 = case2.index.tolist()
mask_case3 = case3.index.tolist()
mask = mask_case1 + mask_case2 + mask_case3
mask.sort()
return {'Episodes': mask}
return error, _validate
def validate_113():
error = ErrorDefinition(
code='113',
description='Date matching child and adopter(s) is not a valid date.',
affected_fields=['DATE_MATCH'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = pd.to_datetime(ad1['DATE_MATCH'], format='%d/%m/%Y', errors='coerce').notna()
na_location = ad1['DATE_MATCH'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = ad1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_134():
error = ErrorDefinition(
code='134',
description='Data on adoption should not be entered for the OC3 cohort.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR',
'SEX_ADOPTR', 'LS_ADOPTR'],
)
def _validate(dfs):
if 'OC3' not in dfs or 'AD1' not in dfs:
return {}
else:
oc3 = dfs['OC3']
ad1 = dfs['AD1']
ad1['ad1_index'] = ad1.index
all_data = ad1.merge(oc3, how='left', on='CHILD')
na_oc3_data = (
all_data['IN_TOUCH'].isna() &
all_data['ACTIV'].isna() &
all_data['ACCOM'].isna()
)
na_ad1_data = (
all_data['DATE_INT'].isna() &
all_data['DATE_MATCH'].isna() &
all_data['FOSTER_CARE'].isna() &
all_data['NB_ADOPTR'].isna() &
all_data['SEX_ADOPTR'].isna() &
all_data['LS_ADOPTR'].isna()
)
validation_error = ~na_oc3_data & ~na_ad1_data
validation_error_locations = all_data.loc[validation_error, 'ad1_index'].unique()
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_119():
error = ErrorDefinition(
code='119',
description='If the decision is made that a child should no longer be placed for adoption, then the date of this decision and the reason why this decision was made must be completed.',
affected_fields=['REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
na_placed_ceased = adopt['DATE_PLACED_CEASED'].isna()
na_reason_ceased = adopt['REASON_PLACED_CEASED'].isna()
validation_error = (na_placed_ceased & ~na_reason_ceased) | (~na_placed_ceased & na_reason_ceased)
validation_error_locations = adopt.index[validation_error]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_159():
error = ErrorDefinition(
code='159',
description='If a child has been recorded as not receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be completed as well.',
affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
mask1 = oc2['SUBSTANCE_MISUSE'].astype(str) == '1'
mask2 = oc2['INTERVENTION_RECEIVED'].astype(str) == '0'
mask3 = oc2['INTERVENTION_OFFERED'].isna()
validation_error = mask1 & mask2 & mask3
validation_error_locations = oc2.index[validation_error]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_142():
error = ErrorDefinition(
code='142',
description='A new episode has started, but the previous episode has not ended.',
affected_fields=['DEC', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DECOM'] = pd.to_datetime(df['DECOM'], format='%d/%m/%Y', errors='coerce')
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
df['DECOM'] = df['DECOM'].fillna('01/01/1901') # Watch for potential future issues
df['DECOM'] = df['DECOM'].replace('01/01/1901', pd.NA)
last_episodes = df.sort_values('DECOM').reset_index().groupby(['CHILD'])['index'].last()
ended_episodes_df = df.loc[~df.index.isin(last_episodes)]
ended_episodes_df = ended_episodes_df[(ended_episodes_df['DEC'].isna() | ended_episodes_df['REC'].isna()) &
ended_episodes_df['CHILD'].notna() & ended_episodes_df[
'DECOM'].notna()]
mask = ended_episodes_df.index.tolist()
return {'Episodes': mask}
return error, _validate
def validate_148():
error = ErrorDefinition(
code='148',
description='Date episode ceased and reason episode ceased must both be coded, or both left blank.',
affected_fields=['DEC', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
mask = ((df['DEC'].isna()) & (df['REC'].notna())) | ((df['DEC'].notna()) & (df['REC'].isna()))
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_151():
error = ErrorDefinition(
code='151',
description='All data items relating to a childs adoption must be coded or left blank.',
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTER', 'SEX_ADOPTR', 'LS_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
na_date_int = ad1['DATE_INT'].isna()
na_date_match = ad1['DATE_MATCH'].isna()
na_foster_care = ad1['FOSTER_CARE'].isna()
na_nb_adoptr = ad1['NB_ADOPTR'].isna()
na_sex_adoptr = ad1['SEX_ADOPTR'].isna()
na_lsadoptr = ad1['LS_ADOPTR'].isna()
ad1_not_null = (
~na_date_int & ~na_date_match & ~na_foster_care & ~na_nb_adoptr & ~na_sex_adoptr & ~na_lsadoptr)
validation_error = (
~na_date_int | ~na_date_match | ~na_foster_care | ~na_nb_adoptr | ~na_sex_adoptr | ~na_lsadoptr) & ~ad1_not_null
validation_error_locations = ad1.index[validation_error]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_182():
error = ErrorDefinition(
code='182',
description='Data entries on immunisations, teeth checks, health assessments and substance misuse problem identified should be completed or all OC2 fields should be left blank.',
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'CONVICTED',
'HEALTH_CHECK', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
mask1 = (
oc2['IMMUNISATIONS'].isna() |
oc2['TEETH_CHECK'].isna() |
oc2['HEALTH_ASSESSMENT'].isna() |
oc2['SUBSTANCE_MISUSE'].isna()
)
mask2 = (
oc2['CONVICTED'].isna() &
oc2['HEALTH_CHECK'].isna() &
oc2['INTERVENTION_RECEIVED'].isna() &
oc2['INTERVENTION_OFFERED'].isna()
)
validation_error = mask1 & ~mask2
validation_error_locations = oc2.index[validation_error]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_214():
error = ErrorDefinition(
code='214',
description='Placement location information not required.',
affected_fields=['PL_POST', 'URN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['LS'].isin(['V3', 'V4']) & ((df['PL_POST'].notna()) | (df['URN'].notna()))
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_222():
error = ErrorDefinition(
code='222',
description='Ofsted Unique reference number (URN) should not be recorded for this placement type.',
affected_fields=['URN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
place_code_list = ['H5', 'P1', 'P2', 'P3', 'R1', 'R2', 'R5', 'T0', 'T1', 'T2', 'T3', 'T4', 'Z1']
mask = (df['PLACE'].isin(place_code_list)) & (df['URN'].notna()) & (df['URN'] != 'XXXXXX')
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_366():
error = ErrorDefinition(
code='366',
description='A child cannot change placement during the course of an individual short-term respite break.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = (df['LS'] == 'V3') & (df['RNE'] != 'S')
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_628():
error = ErrorDefinition(
code='628',
description='Motherhood details are not required for care leavers who have not been looked after during the year.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'OC3' not in dfs:
return {}
else:
hea = dfs['Header']
epi = dfs['Episodes']
oc3 = dfs['OC3']
hea = hea.reset_index()
oc3_no_nulls = oc3[oc3[['IN_TOUCH', 'ACTIV', 'ACCOM']].notna().any(axis=1)]
hea_merge_epi = hea.merge(epi, how='left', on='CHILD', indicator=True)
hea_not_in_epi = hea_merge_epi[hea_merge_epi['_merge'] == 'left_only']
cohort_to_check = hea_not_in_epi.merge(oc3_no_nulls, how='inner', on='CHILD')
error_cohort = cohort_to_check[cohort_to_check['MOTHER'].notna()]
error_list = list(set(error_cohort['index'].to_list()))
error_list.sort()
return {'Header': error_list}
return error, _validate
def validate_164():
error = ErrorDefinition(
code='164',
description='Distance is not valid. Please check a valid postcode has been entered.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
distance = pd.to_numeric(df['PL_DISTANCE'], errors='coerce')
# Use a bit of tolerance in these bounds
distance_valid = distance.gt(-0.2) & distance.lt(1001.0)
mask = ~is_short_term & ~distance_valid
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_169():
error = ErrorDefinition(
code='169',
description='Local Authority (LA) of placement is not valid or is missing. Please check a valid postcode has been entered.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
# Because PL_LA is derived, it will always be valid if present
mask = ~is_short_term & df['PL_LA'].isna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_179():
error = ErrorDefinition(
code='179',
description='Placement location code is not a valid code.',
affected_fields=['PL_LOCATION'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
# Because PL_LOCATION is derived, it will always be valid if present
mask = ~is_short_term & df['PL_LOCATION'].isna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_1015():
error = ErrorDefinition(
code='1015',
description='Placement provider is own provision but child not placed in own LA.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
local_authority = dfs['metadata']['localAuthority']
placement_fostering_or_adoption = df['PLACE'].isin([
'A3', 'A4', 'A5', 'A6', 'U1', 'U2', 'U3', 'U4', 'U5', 'U6',
])
own_provision = df['PLACE_PROVIDER'].eq('PR1')
is_short_term = df['LS'].isin(['V3', 'V4'])
is_pl_la = df['PL_LA'].eq(local_authority)
checked_episodes = ~placement_fostering_or_adoption & ~is_short_term & own_provision
checked_episodes = checked_episodes & df['LS'].notna() & df['PLACE'].notna()
mask = checked_episodes & ~is_pl_la
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_411():
error = ErrorDefinition(
code='411',
description='Placement location code disagrees with LA of placement.',
affected_fields=['PL_LOCATION'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
local_authority = dfs['metadata']['localAuthority']
mask = df['PL_LOCATION'].eq('IN') & df['PL_LA'].ne(local_authority)
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_420():
error = ErrorDefinition(
code='420',
description='LA of placement completed but child is looked after under legal status V3 or V4.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
mask = is_short_term & df['PL_LA'].notna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_355():
error = ErrorDefinition(
code='355',
description='Episode appears to have lasted for less than 24 hours',
affected_fields=['DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['DECOM'].astype(str) == df['DEC'].astype(str)
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_586():
error = ErrorDefinition(
code='586',
description='Dates of missing periods are before child’s date of birth.',
affected_fields=['MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
df = dfs['Missing']
df['DOB'] = pd.to_datetime(df['DOB'], format='%d/%m/%Y', errors='coerce')
df['MIS_START'] = pd.to_datetime(df['MIS_START'], format='%d/%m/%Y', errors='coerce')
error_mask = df['MIS_START'].notna() & (df['MIS_START'] <= df['DOB'])
return {'Missing': df.index[error_mask].to_list()}
return error, _validate
def validate_630():
error = ErrorDefinition(
code='630',
description='Information on previous permanence option should be returned.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'PrevPerm' not in dfs or 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
pre = dfs['PrevPerm']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
epi = epi.reset_index()
# Form the episode dataframe which has an 'RNE' of 'S' in this financial year
epi_has_rne_of_S_in_year = epi[(epi['RNE'] == 'S') & (epi['DECOM'] >= collection_start)]
# Merge to see
# 1) which CHILD ids are missing from the PrevPerm file
# 2) which CHILD are in the prevPerm file, but don't have the LA_PERM/DATE_PERM field completed where they should be
# 3) which CHILD are in the PrevPerm file, but don't have the PREV_PERM field completed.
merged_epi_preperm = epi_has_rne_of_S_in_year.merge(pre, on='CHILD', how='left', indicator=True)
error_not_in_preperm = merged_epi_preperm['_merge'] == 'left_only'
error_wrong_values_in_preperm = (merged_epi_preperm['PREV_PERM'] != 'Z1') & (
merged_epi_preperm[['LA_PERM', 'DATE_PERM']].isna().any(axis=1))
error_null_prev_perm = (merged_epi_preperm['_merge'] == 'both') & (merged_epi_preperm['PREV_PERM'].isna())
error_mask = error_not_in_preperm | error_wrong_values_in_preperm | error_null_prev_perm
error_list = merged_epi_preperm[error_mask]['index'].to_list()
error_list = list(set(error_list))
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_501():
error = ErrorDefinition(
code='501',
description='A new episode has started before the end date of the previous episode.',
affected_fields=['DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi = epi.reset_index()
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi['DEC'] = pd.to_datetime(epi['DEC'], format='%d/%m/%Y', errors='coerce')
epi = epi.sort_values(['CHILD', 'DECOM'])
epi_lead = epi.shift(1)
epi_lead = epi_lead.reset_index()
m_epi = epi.merge(epi_lead, left_on='index', right_on='level_0', suffixes=('', '_prev'))
error_cohort = m_epi[(m_epi['CHILD'] == m_epi['CHILD_prev']) & (m_epi['DECOM'] < m_epi['DEC_prev'])]
error_list = error_cohort['index'].to_list()
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_502():
error = ErrorDefinition(
code='502',
description='Last year’s record ended with an open episode. The date on which that episode started does not match the start date of the first episode on this year’s record.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi_last = dfs['Episodes_last']
epi = epi.reset_index()
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi_last['DECOM'] = pd.to_datetime(epi_last['DECOM'], format='%d/%m/%Y', errors='coerce')
epi_last_no_dec = epi_last[epi_last['DEC'].isna()]
epi_min_decoms_index = epi[['CHILD', 'DECOM']].groupby(['CHILD'])['DECOM'].idxmin()
epi_min_decom_df = epi.loc[epi_min_decoms_index, :]
merged_episodes = epi_min_decom_df.merge(epi_last_no_dec, on='CHILD', how='inner')
error_cohort = merged_episodes[merged_episodes['DECOM_x'] != merged_episodes['DECOM_y']]
error_list = error_cohort['index'].to_list()
error_list = list(set(error_list))
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_153():
error = ErrorDefinition(
code='153',
description="All data items relating to a child's activity or accommodation after leaving care must be coded or left blank.",
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
oc3 = dfs['OC3']
oc3_not_na = (
oc3['IN_TOUCH'].notna() &
oc3['ACTIV'].notna() &
oc3['ACCOM'].notna()
)
oc3_all_na = (
oc3['IN_TOUCH'].isna() &
oc3['ACTIV'].isna() &
oc3['ACCOM'].isna()
)
validation_error = ~oc3_not_na & ~oc3_all_na
validation_error_locations = oc3.index[validation_error]
return {'OC3': validation_error_locations.to_list()}
return error, _validate
def validate_166():
error = ErrorDefinition(
code='166',
description="Date of review is invalid or blank.",
affected_fields=['REVIEW'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
review = dfs['Reviews']
error_mask = pd.to_datetime(review['REVIEW'], format='%d/%m/%Y', errors='coerce').isna()
validation_error_locations = review.index[error_mask]
return {'Reviews': validation_error_locations.to_list()}
return error, _validate
def validate_174():
error = ErrorDefinition(
code='174',
description="Mother's child date of birth is recorded but gender shows that the child is a male.",
affected_fields=['SEX', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
child_is_male = header['SEX'].astype(str) == '1'
mc_dob_recorded = header['MC_DOB'].notna()
error_mask = child_is_male & mc_dob_recorded
validation_error_locations = header.index[error_mask]
return {'Header': validation_error_locations.to_list()}
return error, _validate
def validate_180():
error = ErrorDefinition(
code='180',
description="Data entry for the strengths and difficulties questionnaire (SDQ) score is invalid.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
oc2['SDQ_SCORE'] = pd.to_numeric(oc2['SDQ_SCORE'], errors='coerce')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['SDQ_SCORE'].isin(range(41))
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_181():
error = ErrorDefinition(
code='181',
description="Data items relating to children looked after continuously for 12 months should be completed with a 0 or 1.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
code_list = ['0', '1']
fields_of_interest = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
error_mask = (
oc2[fields_of_interest].notna()
& ~oc2[fields_of_interest].astype(str).isin(['0', '1'])
).any(axis=1)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_192():
error = ErrorDefinition(
code='192',
description="Child has been identified as having a substance misuse problem but the additional item on whether an intervention was received has been left blank.",
affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
misuse = oc2['SUBSTANCE_MISUSE'].astype(str) == '1'
intervention_blank = oc2['INTERVENTION_RECEIVED'].isna()
error_mask = misuse & intervention_blank
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_193():
error = ErrorDefinition(
code='193',
description="Child not identified as having a substance misuse problem but at least one of the two additional items on whether an intervention were offered and received have been completed.",
affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
no_substance_misuse = oc2['SUBSTANCE_MISUSE'].isna() | (oc2['SUBSTANCE_MISUSE'].astype(str) == '0')
intervention_not_blank = oc2['INTERVENTION_RECEIVED'].notna() | oc2['INTERVENTION_OFFERED'].notna()
error_mask = no_substance_misuse & intervention_not_blank
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_197a():
error = ErrorDefinition(
code='197a',
description="Reason for no Strengths and Difficulties (SDQ) score is not required if Strengths and Difficulties Questionnaire score is filled in.",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
sdq_filled_in = oc2['SDQ_SCORE'].notna()
reason_filled_in = oc2['SDQ_REASON'].notna()
error_mask = sdq_filled_in & reason_filled_in
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_567():
error = ErrorDefinition(
code='567',
description='The date that the missing episode or episode that the child was away from placement without authorisation ended is before the date that it started.',
affected_fields=['MIS_START', 'MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
mis['MIS_START'] = pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce')
mis['MIS_END'] = pd.to_datetime(mis['MIS_END'], format='%d/%m/%Y', errors='coerce')
mis_error = mis[mis['MIS_START'] > mis['MIS_END']]
return {'Missing': mis_error.index.to_list()}
return error, _validate
def validate_304():
error = ErrorDefinition(
code='304',
description='Date unaccompanied asylum-seeking child (UASC) status ceased must be on or before the 18th birthday of a child.',
affected_fields=['DUC'],
)
def _validate(dfs):
if 'UASC' not in dfs:
return {}
else:
uasc = dfs['UASC']
uasc['DOB'] = pd.to_datetime(uasc['DOB'], format='%d/%m/%Y', errors='coerce')
uasc['DUC'] = pd.to_datetime(uasc['DUC'], format='%d/%m/%Y', errors='coerce')
mask = uasc['DUC'].notna() & (uasc['DUC'] > uasc['DOB'] + pd.offsets.DateOffset(years=18))
return {'UASC': uasc.index[mask].to_list()}
return error, _validate
def validate_333():
error = ErrorDefinition(
code='333',
description='Date should be placed for adoption must be on or prior to the date of matching child with adopter(s).',
affected_fields=['DATE_INT'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
adt = dfs['AD1']
adt['DATE_MATCH'] = pd.to_datetime(adt['DATE_MATCH'], format='%d/%m/%Y', errors='coerce')
adt['DATE_INT'] = pd.to_datetime(adt['DATE_INT'], format='%d/%m/%Y', errors='coerce')
# If <DATE_MATCH> provided, then <DATE_INT> must also be provided and be <= <DATE_MATCH>
mask1 = adt['DATE_MATCH'].notna() & adt['DATE_INT'].isna()
mask2 = adt['DATE_MATCH'].notna() & adt['DATE_INT'].notna() & (adt['DATE_INT'] > adt['DATE_MATCH'])
mask = mask1 | mask2
return {'AD1': adt.index[mask].to_list()}
return error, _validate
def validate_1011():
error = ErrorDefinition(
code='1011',
description='This child is recorded as having his/her care transferred to another local authority for the final episode and therefore should not have the care leaver information completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs or 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
oc3 = dfs['OC3']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
# If final <REC> = 'E3' then <IN_TOUCH>; <ACTIV> and <ACCOM> should not be provided
epi.sort_values(['CHILD', 'DECOM'], inplace=True)
grouped_decom_by_child = epi.groupby(['CHILD'])['DECOM'].idxmax(skipna=True)
max_decom_only = epi.loc[epi.index.isin(grouped_decom_by_child), :]
E3_is_last = max_decom_only[max_decom_only['REC'] == 'E3']
oc3.reset_index(inplace=True)
cohort_to_check = oc3.merge(E3_is_last, on='CHILD', how='inner')
error_mask = cohort_to_check[['IN_TOUCH', 'ACTIV', 'ACCOM']].notna().any(axis=1)
error_list = cohort_to_check['index'][error_mask].to_list()
error_list = list(set(error_list))
error_list.sort()
return {'OC3': error_list}
return error, _validate
def validate_574():
error = ErrorDefinition(
code='574',
description='A new missing/away from placement without authorisation period cannot start when the previous missing/away from placement without authorisation period is still open. Missing/away from placement without authorisation periods should also not overlap.',
affected_fields=['MIS_START', 'MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
mis['MIS_START'] = pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce')
mis['MIS_END'] = pd.to_datetime(mis['MIS_END'], format='%d/%m/%Y', errors='coerce')
mis.sort_values(['CHILD', 'MIS_START'], inplace=True)
mis.reset_index(inplace=True)
mis.reset_index(inplace=True) # Twice on purpose
mis['LAG_INDEX'] = mis['level_0'].shift(-1)
lag_mis = mis.merge(mis, how='inner', left_on='level_0', right_on='LAG_INDEX', suffixes=['', '_PREV'])
# We're only interested in cases where there is more than one row for a child.
lag_mis = lag_mis[lag_mis['CHILD'] == lag_mis['CHILD_PREV']]
# A previous MIS_END date is null
mask1 = lag_mis['MIS_END_PREV'].isna()
# MIS_START is before previous MIS_END (overlapping dates)
mask2 = lag_mis['MIS_START'] < lag_mis['MIS_END_PREV']
mask = mask1 | mask2
error_list = lag_mis['index'][mask].to_list()
error_list.sort()
return {'Missing': error_list}
return error, _validate
def validate_564():
error = ErrorDefinition(
code='564',
description='Child was missing or away from placement without authorisation and the date started is blank.',
affected_fields=['MISSING', 'MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
error_mask = mis['MISSING'].isin(['M', 'A', 'm', 'a']) & mis['MIS_START'].isna()
return {'Missing': mis.index[error_mask].to_list()}
return error, _validate
def validate_566():
error = ErrorDefinition(
code='566',
description='The date that the child' + chr(
39) + 's episode of being missing or away from placement without authorisation ended has been completed but whether the child was missing or away without authorisation has not been completed.',
affected_fields=['MISSING', 'MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
error_mask = mis['MISSING'].isna() & mis['MIS_END'].notna()
return {'Missing': mis.index[error_mask].to_list()}
return error, _validate
def validate_436():
error = ErrorDefinition(
code='436',
description='Reason for new episode is that both child’s placement and legal status have changed, but this is not reflected in the episode data.',
affected_fields=['RNE', 'LS', 'PLACE', 'PL_POST', 'URN', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi.sort_values(['CHILD', 'DECOM'], inplace=True)
epi.reset_index(inplace=True)
epi.reset_index(inplace=True)
epi['LAG_INDEX'] = epi['level_0'].shift(-1)
epi.fillna(value={"LS": '*', "PLACE": '*', "PL_POST": '*', "URN": '*', "PLACE_PROVIDER": '*'}, inplace=True)
epi_merge = epi.merge(epi, how='inner', left_on='level_0', right_on='LAG_INDEX', suffixes=['', '_PRE'])
epi_multi_row = epi_merge[epi_merge['CHILD'] == epi_merge['CHILD_PRE']]
epi_has_B_U = epi_multi_row[epi_multi_row['RNE'].isin(['U', 'B'])]
mask_ls = epi_has_B_U['LS'] == epi_has_B_U['LS_PRE']
mask1 = epi_has_B_U['PLACE'] == epi_has_B_U['PLACE_PRE']
mask2 = epi_has_B_U['PL_POST'] == epi_has_B_U['PL_POST_PRE']
mask3 = epi_has_B_U['URN'] == epi_has_B_U['URN_PRE']
mask4 = epi_has_B_U['PLACE_PROVIDER'] == epi_has_B_U['PLACE_PROVIDER_PRE']
error_mask = mask_ls | (mask1 & mask2 & mask3 & mask4)
error_list = epi_has_B_U[error_mask]['index'].to_list()
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_570():
error = ErrorDefinition(
code='570',
description='The date that the child started to be missing or away from placement without authorisation is after the end of the collection year.',
affected_fields=['MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
mis['MIS_START'] = pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce')
error_mask = mis['MIS_START'] > collection_end
return {'Missing': mis.index[error_mask].to_list()}
return error, _validate
def validate_531():
error = ErrorDefinition(
code='531',
description='A placement provider code of PR5 cannot be associated with placements P1.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
error_mask = (epi['PLACE'] == 'P1') & (epi['PLACE_PROVIDER'] == 'PR5')
return {'Episodes': epi.index[error_mask].to_list()}
return error, _validate
def validate_542():
error = ErrorDefinition(
code='542',
description='A child aged under 10 at 31 March should not have conviction information completed.',
affected_fields=['CONVICTED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
error_mask = (oc2['DOB'] + pd.offsets.DateOffset(years=10) > collection_end) & oc2['CONVICTED'].notna()
return {'OC2': oc2.index[error_mask].to_list()}
return error, _validate
def validate_620():
error = ErrorDefinition(
code='620',
description='Child has been recorded as a mother, but date of birth shows that the mother is under 11 years of age.',
affected_fields=['DOB', 'MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
hea = dfs['Header']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
hea['DOB'] = pd.to_datetime(hea['DOB'], format='%d/%m/%Y', errors='coerce')
hea_mother = hea[hea['MOTHER'].astype(str) == '1']
error_cohort = (hea_mother['DOB'] + pd.offsets.DateOffset(years=11)) > collection_start
return {'Header': hea_mother.index[error_cohort].to_list()}
return error, _validate
def validate_225():
error = ErrorDefinition(
code='225',
description='Reason for placement change must be recorded.',
affected_fields=['REASON_PLACE_CHANGE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi.sort_values(['CHILD', 'DECOM'], inplace=True)
epi.reset_index(inplace=True)
epi.reset_index(inplace=True)
epi['LAG_INDEX'] = epi['level_0'].shift(1)
m_epi = epi.merge(epi, how='inner', left_on='level_0', right_on='LAG_INDEX', suffixes=['', '_NEXT'])
m_epi = m_epi[m_epi['CHILD'] == m_epi['CHILD_NEXT']]
mask_is_X1 = m_epi['REC'] == 'X1'
mask_null_place_chg = m_epi['REASON_PLACE_CHANGE'].isna()
mask_place_not_T = ~m_epi['PLACE'].isin(['T0', 'T1', 'T2', 'T3', 'T4'])
mask_next_is_PBTU = m_epi['RNE_NEXT'].isin(['P', 'B', 'T', 'U'])
mask_next_place_not_T = ~m_epi['PLACE_NEXT'].isin(['T0', 'T1', 'T2', 'T3', 'T4'])
error_mask = mask_is_X1 & mask_null_place_chg & mask_place_not_T & mask_next_is_PBTU & mask_next_place_not_T
error_list = m_epi['index'][error_mask].to_list()
return {'Episodes': error_list}
return error, _validate
def validate_353():
error = ErrorDefinition(
code='353',
description='No episode submitted can start before 14 October 1991.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi['DECOM'] = | pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Tests the TextReader class in parsers.pyx, which
is integral to the C engine in parsers.py
"""
import os
import numpy as np
from numpy import nan
import pytest
import pandas._libs.parsers as parser
from pandas._libs.parsers import TextReader
import pandas.compat as compat
from pandas.compat import BytesIO, StringIO, map
from pandas import DataFrame
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
from pandas.io.parsers import TextFileReader, read_csv
class TestTextReader(object):
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath('io', 'parser', 'data')
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def test_file_handle(self):
with open(self.csv1, 'rb') as f:
reader = TextReader(f)
reader.read()
def test_string_filename(self):
reader = TextReader(self.csv1, header=None)
reader.read()
def test_file_handle_mmap(self):
with open(self.csv1, 'rb') as f:
reader = TextReader(f, memory_map=True, header=None)
reader.read()
def test_StringIO(self):
with open(self.csv1, 'rb') as f:
text = f.read()
src = BytesIO(text)
reader = TextReader(src, header=None)
reader.read()
def test_string_factorize(self):
# should this be optional?
data = 'a\nb\na\nb\na'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
assert len(set(map(id, result[0]))) == 2
def test_skipinitialspace(self):
data = ('a, b\n'
'a, b\n'
'a, b\n'
'a, b')
reader = TextReader(StringIO(data), skipinitialspace=True,
header=None)
result = reader.read()
tm.assert_numpy_array_equal(result[0], np.array(['a', 'a', 'a', 'a'],
dtype=np.object_))
tm.assert_numpy_array_equal(result[1], np.array(['b', 'b', 'b', 'b'],
dtype=np.object_))
def test_parse_booleans(self):
data = 'True\nFalse\nTrue\nTrue'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
assert result[0].dtype == np.bool_
def test_delimit_whitespace(self):
data = 'a b\na\t\t "b"\n"a"\t \t b'
reader = TextReader(StringIO(data), delim_whitespace=True,
header=None)
result = reader.read()
tm.assert_numpy_array_equal(result[0], np.array(['a', 'a', 'a'],
dtype=np.object_))
tm.assert_numpy_array_equal(result[1], np.array(['b', 'b', 'b'],
dtype=np.object_))
def test_embedded_newline(self):
data = 'a\n"hello\nthere"\nthis'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
expected = np.array(['a', 'hello\nthere', 'this'], dtype=np.object_)
| tm.assert_numpy_array_equal(result[0], expected) | pandas.util.testing.assert_numpy_array_equal |
"""
classification models
"""
import numpy as np
import pandas as pd
from tqdm import tqdm
import datetime
import time
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer, MissingIndicator
from sklearn.preprocessing import StandardScaler, OneHotEncoder, OrdinalEncoder
from sklearn.compose import ColumnTransformer
from sklearn.utils import all_estimators
from sklearn.base import ClassifierMixin
from sklearn.naive_bayes import (ComplementNB, MultinomialNB)
from sklearn.multioutput import ClassifierChain
from sklearn.ensemble import GradientBoostingClassifier
# from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.isotonic import IsotonicRegression
from sklearn.neural_network import MLPClassifier
from sklearn.cross_decomposition import CCA
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.metrics import (
accuracy_score,
balanced_accuracy_score,
roc_auc_score,
f1_score,
)
import sklearn
from sklearn.utils._testing import ignore_warnings
import warnings
import xgboost
# import catboost
import lightgbm
warnings.filterwarnings("ignore")
pd.set_option("display.precision", 2)
pd.set_option("display.float_format", lambda x: "%.2f" % x)
removed_classifiers = [
# ("CheckingClassifier", sklearn.utils._mocking.CheckingClassifier),
("ClassifierChain", ClassifierChain),
("ComplementNB", ComplementNB),
("GradientBoostingClassifier",GradientBoostingClassifier,),
("GaussianProcessClassifier",GaussianProcessClassifier,),
# (
# "HistGradientBoostingClassifier",
# HistGradientBoostingClassifier,
# ),
("MLPClassifier", MLPClassifier),
("LogisticRegressionCV", sklearn.linear_model.LogisticRegressionCV),
("MultiOutputClassifier", sklearn.multioutput.MultiOutputClassifier),
("MultinomialNB", MultinomialNB),
("OneVsOneClassifier", sklearn.multiclass.OneVsOneClassifier),
("OneVsRestClassifier", sklearn.multiclass.OneVsRestClassifier),
("OutputCodeClassifier", sklearn.multiclass.OutputCodeClassifier),
(
"RadiusNeighborsClassifier",
sklearn.neighbors.RadiusNeighborsClassifier,
),
("VotingClassifier", sklearn.ensemble.VotingClassifier),
]
CLASSIFIERS = [est for est in all_estimators() if
(issubclass(est[1], ClassifierMixin) and (est[0] not in removed_classifiers))]
CLASSIFIERS.append(("XGBClassifier", xgboost.XGBClassifier))
CLASSIFIERS.append(("LGBMClassifier", lightgbm.LGBMClassifier))
# CLASSIFIERS.append(('CatBoostClassifier',catboost.CatBoostClassifier))
CLASSIFIERS_DICT = {key : value for key, value in CLASSIFIERS}
numeric_transformer = Pipeline(
steps=[("imputer", SimpleImputer(strategy="mean")), ("scaler", StandardScaler())]
)
categorical_transformer_low = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="constant", fill_value="missing")),
("encoding", OneHotEncoder(handle_unknown="ignore", sparse=False)),
]
)
categorical_transformer_high = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="constant", fill_value="missing")),
# 'OrdianlEncoder' Raise a ValueError when encounters an unknown value. Check https://github.com/scikit-learn/scikit-learn/pull/13423
("encoding", OrdinalEncoder()),
]
)
# Helper function
def get_card_split(df, cols, n=11):
"""
Splits categorical columns into 2 lists based on cardinality (i.e # of unique values)
Parameters
----------
df : Pandas DataFrame
DataFrame from which the cardinality of the columns is calculated.
cols : list-like
Categorical columns to list
n : int, optional (default=11)
The value of 'n' will be used to split columns.
Returns
-------
card_low : list-like
Columns with cardinality < n
card_high : list-like
Columns with cardinality >= n
"""
cond = df[cols].nunique() > n
card_high = cols[cond]
card_low = cols[~cond]
return card_low, card_high
class Classifier:
"""
This module helps in fitting to all the classification algorithms that are available in Scikit-learn
Parameters
----------
verbose : int, optional (default=0)
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
ignore_warnings : bool, optional (default=True)
When set to True, the warning related to algorigms that are not able to run are ignored.
custom_metric : function, optional (default=None)
When function is provided, models are evaluated based on the custom evaluation metric provided.
prediction : bool, optional (default=False)
When set to True, the predictions of all the models models are returned as dataframe.
classifiers : list, optional (default="all")
When function is provided, trains the chosen classifier(s).
Examples
--------
>>> from prettymetrics.clf import Classifier
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.model_selection import train_test_split
>>> data = load_breast_cancer()
>>> X = data.data
>>> y= data.target
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=.5,random_state =123)
>>> clf = Classifier(verbose=0,ignore_warnings=True, custom_metric=None)
>>> models,predictions = clf.fit(X_train, X_test, y_train, y_test)
>>> model_dictionary = clf.provide_models(X_train,X_test,y_train,y_test)
>>> models
| Model | Accuracy | Balanced Accuracy | ROC AUC | F1 Score | Time Taken |
|:-------------------------------|-----------:|--------------------:|----------:|-----------:|-------------:|
| LinearSVC | 0.989474 | 0.987544 | 0.987544 | 0.989462 | 0.0150008 |
| SGDClassifier | 0.989474 | 0.987544 | 0.987544 | 0.989462 | 0.0109992 |
| MLPClassifier | 0.985965 | 0.986904 | 0.986904 | 0.985994 | 0.426 |
| Perceptron | 0.985965 | 0.984797 | 0.984797 | 0.985965 | 0.0120046 |
| LogisticRegression | 0.985965 | 0.98269 | 0.98269 | 0.985934 | 0.0200036 |
| LogisticRegressionCV | 0.985965 | 0.98269 | 0.98269 | 0.985934 | 0.262997 |
| SVC | 0.982456 | 0.979942 | 0.979942 | 0.982437 | 0.0140011 |
| CalibratedClassifierCV | 0.982456 | 0.975728 | 0.975728 | 0.982357 | 0.0350015 |
| PassiveAggressiveClassifier | 0.975439 | 0.974448 | 0.974448 | 0.975464 | 0.0130005 |
| LabelPropagation | 0.975439 | 0.974448 | 0.974448 | 0.975464 | 0.0429988 |
| LabelSpreading | 0.975439 | 0.974448 | 0.974448 | 0.975464 | 0.0310006 |
| RandomForestClassifier | 0.97193 | 0.969594 | 0.969594 | 0.97193 | 0.033 |
| GradientBoostingClassifier | 0.97193 | 0.967486 | 0.967486 | 0.971869 | 0.166998 |
| QuadraticDiscriminantAnalysis | 0.964912 | 0.966206 | 0.966206 | 0.965052 | 0.0119994 |
| HistGradientBoostingClassifier | 0.968421 | 0.964739 | 0.964739 | 0.968387 | 0.682003 |
| RidgeClassifierCV | 0.97193 | 0.963272 | 0.963272 | 0.971736 | 0.0130029 |
| RidgeClassifier | 0.968421 | 0.960525 | 0.960525 | 0.968242 | 0.0119977 |
| AdaBoostClassifier | 0.961404 | 0.959245 | 0.959245 | 0.961444 | 0.204998 |
| ExtraTreesClassifier | 0.961404 | 0.957138 | 0.957138 | 0.961362 | 0.0270066 |
| KNeighborsClassifier | 0.961404 | 0.95503 | 0.95503 | 0.961276 | 0.0560005 |
| BaggingClassifier | 0.947368 | 0.954577 | 0.954577 | 0.947882 | 0.0559971 |
| BernoulliNB | 0.950877 | 0.951003 | 0.951003 | 0.951072 | 0.0169988 |
| LinearDiscriminantAnalysis | 0.961404 | 0.950816 | 0.950816 | 0.961089 | 0.0199995 |
| GaussianNB | 0.954386 | 0.949536 | 0.949536 | 0.954337 | 0.0139935 |
| NuSVC | 0.954386 | 0.943215 | 0.943215 | 0.954014 | 0.019989 |
| DecisionTreeClassifier | 0.936842 | 0.933693 | 0.933693 | 0.936971 | 0.0170023 |
| NearestCentroid | 0.947368 | 0.933506 | 0.933506 | 0.946801 | 0.0160074 |
| ExtraTreeClassifier | 0.922807 | 0.912168 | 0.912168 | 0.922462 | 0.0109999 |
| CheckingClassifier | 0.361404 | 0.5 | 0.5 | 0.191879 | 0.0170043 |
| DummyClassifier | 0.512281 | 0.489598 | 0.489598 | 0.518924 | 0.0119965 |
"""
def __init__(
self,
verbose = 0,
ignore_warnings = True,
custom_metric = None,
predictions = False,
random_state = 42,
classifiers = "all"
):
self.verbose = verbose
self.ignore_warnings = ignore_warnings
self.custom_metric = custom_metric
self.predictions = predictions
self.models = {}
self.random_state = random_state
self.classifiers = classifiers
def get_classifiers(self):
if self.classifiers == "all":
self.classifiers = CLASSIFIERS
return
try:
temp_list = [(classifier, CLASSIFIERS_DICT[classifier]) for classifier in self.classifiers]
self.classifiers = temp_list
except Exception as exception:
print(exception)
print("Invalid Classifier(s)")
def fit(self, X_train, X_test, y_train, y_test):
"""Fit Classification algorithms to X_train and y_train, predict and score on X_test, y_test.
Parameters
----------
X_train : array-like,
Training vectors, where rows is the number of samples
and columns is the number of features.
X_test : array-like,
Testing vectors, where rows is the number of samples
and columns is the number of features.
y_train : array-like,
Training vectors, where rows is the number of samples
and columns is the number of features.
y_test : array-like,
Testing vectors, where rows is the number of samples
and columns is the number of features.
Returns
-------
scores : Pandas DataFrame
Returns metrics of all the models in a Pandas DataFrame.
predictions : Pandas DataFrame
Returns predictions of all the models in a Pandas DataFrame.
"""
accuracy_list = []
balanced_accuracy_list = []
roc_auc_list = []
f1_list = []
names = []
time_list = []
predictions = {}
if self.custom_metric:
CUSTOM_METRIC = []
if isinstance(X_train, np.ndarray):
X_train = pd.DataFrame(X_train)
X_test = pd.DataFrame(X_test)
numeric_features = X_train.select_dtypes(include=[np.number]).columns
categorical_features = X_train.select_dtypes(include=["object"]).columns
categorical_low, categorical_high = get_card_split(
X_train, categorical_features
)
preprocessor = ColumnTransformer(
transformers = [
("numeric", numeric_transformer, numeric_features),
("categorical_low", categorical_transformer_low, categorical_low),
("categorical_high", categorical_transformer_high, categorical_high),
]
)
self.get_classifiers()
for name, model in tqdm(self.classifiers):
start = time.time()
try:
if "random_state" in model().get_params().keys():
pipe = Pipeline(
steps = [
("preprocessor", preprocessor),
("classifier", model(random_state=self.random_state)),
]
)
else:
pipe = Pipeline(
steps = [("preprocessor", preprocessor), ("classifier", model())]
)
pipe.fit(X_train, y_train)
self.models[name] = pipe
y_pred = pipe.predict(X_test)
accuracy = accuracy_score(y_test, y_pred, normalize=True)
b_accuracy = balanced_accuracy_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred, average="weighted")
try:
roc_auc = roc_auc_score(y_test, y_pred)
except Exception as exception:
roc_auc = None
if not self.ignore_warnings:
print("ROC AUC couldn't be calculated for " + name)
print(exception)
# indexes.append()
names.append(name)
accuracy_list.append(accuracy)
balanced_accuracy_list.append(b_accuracy)
roc_auc_list.append(roc_auc)
f1_list.append(f1)
time_list.append(time.time() - start)
if self.custom_metric:
custom_metric = self.custom_metric(y_test, y_pred)
CUSTOM_METRIC.append(custom_metric)
if self.verbose > 0:
current_metric = {
"Model" : name,
"Accuracy" : accuracy,
"Balanced Accuracy" : b_accuracy,
"ROC AUC" : roc_auc,
"F1 Score" : f1,
"Time taken" : time.time() - start,
}
if self.custom_metric:
current_metric[self.custom_metric.__name__] = custom_metric
print(current_metric)
if self.predictions:
predictions[name] = y_pred
except Exception as exception:
if not self.ignore_warnings:
print(name + " model failed to execute")
print(exception)
# indexes = scores.index[lambda x: x in scores.indexes()]
scores = pd.DataFrame(
{
"Model" : names,
"Accuracy" : accuracy_list,
"Balanced Accuracy" : balanced_accuracy_list,
"ROC AUC" : roc_auc_list,
"F1 Score" : f1_list,
"Time Taken" : time_list,
}
)
if self.custom_metric:
scores[self.custom_metric.__name__] = CUSTOM_METRIC
# Sort the final metris by Balance Accuracy
scores = scores.sort_values(
by = "Balanced Accuracy",
ascending = False,
# ignore_index = True # This is not helping on the indexing
).set_index(
"Model"
)
# TODO: We need to index the score so we can see how many algorithms used
indexes = scores.index.tolist()
# scores['L_Index'] = indexes
if self.predictions:
predictions_df = | pd.DataFrame.from_dict(predictions) | pandas.DataFrame.from_dict |
"""Exhastuve grid search for parameters for TSNE and UMAP"""
import argparse
import itertools
import hdbscan
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.spatial.distance import pdist, squareform
from sklearn.manifold import TSNE, MDS
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix, matthews_corrcoef
from sklearn.model_selection import RepeatedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from umap import UMAP
import sys
sys.path.append("../notebooks/scripts/")
from Helpers import get_PCA_feature_matrix, get_euclidean_data_frame
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--distance-matrix", help="csv file with the distance matrix")
parser.add_argument("--alignment", help="FASTA file with the alignment")
parser.add_argument("--node-data", help="csv file with the clade_membership - that MUST be the name of the column.")
parser.add_argument("--n-neighbors", nargs="+", type=int, help="list of values that the search should use")
parser.add_argument("--min-dist", nargs="+", type=float, help="list of values that the search should use")
parser.add_argument("--perplexity", nargs="+", type=float, help="list of values that the search should use")
parser.add_argument("--threshold-information", nargs="+", help="the distance threshold values to be used on HDBSCAN. if not provided, it will run without.")
parser.add_argument("--learning-rate", nargs="+", type=float, help="list of values that the search should use")
parser.add_argument("--n-repeats", type=int, help="the number of times the k fold generator should repeat the k fold")
parser.add_argument("--output", help="the path where the best thresholds will be saved.")
parser.add_argument("--output-hyperparameters", help="the path where the best parameters will be saved. ")
parser.add_argument("--output-metadata", help="the path where the grid search data will be saved.")
parser.add_argument("--output-figure-HDBSCAN", help="PNG with the results displayed graphically for HDBSCAN thresholds")
parser.add_argument("--output-figure-grid-search", help="PNG with the results displayed graphically for grid search")
args = parser.parse_args()
def _get_embedding_columns_by_method(method):
if method in ("pca"):
return list(f"{method}1 {method}2 {method}3 {method}4 {method}5 {method}6 {method}7 {method}8 {method}9 {method}10".split())
if method in ("mds"):
return list(f"{method}1 {method}2".split())
if method in ("t-sne"):
return list("tsne_x tsne_y".split())
else:
return list(f"{method}_x {method}_y".split())
if(args.threshold_information is not None):
#threshold_df = pd.read_csv(args.threshold_information) threshold_df.loc[threshold_df['embedding'] == args.method][args.column_threshold].values.tolist()[0]
distance_thresholds = args.threshold_information
else:
distance_thresholds = np.arange(0,20,2)
default_tuned_values = []
list_of_embedding_strings = ["t-sne", "umap", "mds", "pca"] #["t-SNE","UMAP","MDS", "PCA"]
embedding_class = [TSNE, UMAP, MDS, PCA]
tuned_parameter_values = []
# reading in the distance matrix and node data
distance_matrix = pd.read_csv(args.distance_matrix, index_col=0)
sequence_names = distance_matrix.index.values.tolist()
# parameters for the methods taken from the exhaustive grid search
embedding_parameters = {
"metric": "precomputed",
"square_distances" : True
}
default_tuned_values.append(embedding_parameters)
embedding_parameters = {
"init": "spectral",
}
default_tuned_values.append(embedding_parameters)
embedding_parameters = {
"dissimilarity": "precomputed",
"n_components" : 2,
"n_init" : 2,
"n_jobs" : 1
}
default_tuned_values.append(embedding_parameters)
embedding_parameters = {
"n_components" : 10,
"svd_solver" : "full"
}
default_tuned_values.append(embedding_parameters)
tuned_parameters_TSNE = {
"perplexity": args.perplexity, #[15, 30, 100],
"learning_rate": args.learning_rate, #[100.0, 200.0, 500.0, 1000.0],
"square_distances" : [True]
}
tuned_parameter_values.append(tuned_parameters_TSNE)
tuned_parameters_UMAP = {
"n_neighbors" : args.n_neighbors, #[25, 100, 200],
"min_dist" : args.min_dist #[.05, .5]
}
tuned_parameter_values.append(tuned_parameters_UMAP)
tuned_parameters_MDS = {
}
tuned_parameter_values.append(tuned_parameters_MDS)
tuned_parameters_PCA = {
}
tuned_parameter_values.append(tuned_parameters_PCA)
# reading in the distance matrix and node data
distance_matrix = | pd.read_csv(args.distance_matrix, index_col=0) | pandas.read_csv |
'''This script contains functions for evaluating models and calculating and visualizing metrics'''
import pandas as pd
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, StratifiedKFold, KFold, cross_validate, cross_val_score, RandomizedSearchCV
from sklearn.metrics import precision_score, recall_score, accuracy_score, roc_auc_score, roc_curve, precision_recall_curve, f1_score, fbeta_score, confusion_matrix, classification_report, make_scorer, auc, log_loss
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier, plot_importance
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import RandomOverSampler, SMOTE, ADASYN
from imblearn.under_sampling import RandomUnderSampler
from collections import Counter, OrderedDict
from scipy.stats import randint
import time
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
def eval(model_name, model, X, y):
'''This is a function to compare preliminary models.
Takes in model and its name from a dictionary containing instantiated models and their names as
values and keys, respectively, and entire dataframe, partitions data, oversamples minority class
in training data set, and evaluates metrics'''
# Partition data
X_tv, X_test, y_tv, y_test = train_test_split(X, y, test_size = 0.2, random_state=33, stratify=y)
X_train, X_val, y_train, y_val = train_test_split(X_tv, y_tv, test_size = 0.2, random_state=14, stratify=y_tv)
# Oversample minority class in training data
oversample = RandomOverSampler(random_state=0, sampling_strategy='minority')
X_train_os, y_train_os = oversample.fit_resample(X_train, y_train)
# Train model
model.fit(X_train_os, y_train_os)
# Make predictions
y_pred = model.predict(X_val)
preds = model.predict_proba(X_val)
# Print scores
print(model_name, ':')
print('Accuracy score: ', accuracy_score(y_val, y_pred))
print('Precision score: ', precision_score(y_val, y_pred))
print('Recall score: ', recall_score(y_val, y_pred))
print('F1 score: ', f1_score(y_val, y_pred))
print('F-beta score: ', fbeta_score(y_val, y_pred, beta=2))
print('ROC-AUC score: ', roc_auc_score(y_val, preds[:,1]), '\n')
def model_scores(model, X, y):
'''
Takes in an instantiated model and training data, partitions the training data
into training and validation sets, trains the model on training data, and returns
evaluation metrics
'''
# Partition data for cross-validation
X_tv, X_test, y_tv, y_test = train_test_split(X, y, test_size=0.2, random_state=5, stratify=y)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=17, stratify=y)
# Train model
model.fit(X_train, y_train)
# Make prediction
y_pred = model.predict(X_val)
preds = model.predict_proba(X_val)
# Print scores
print('Accuracy score: ', accuracy_score(y_val, y_pred))
print('Precision score: ', precision_score(y_val, y_pred))
print('Recall score: ', recall_score(y_val, y_pred))
print('F1 score: ', f1_score(y_val, y_pred))
print('Fbeta score (beta=2): ', fbeta_score(y_val, y_pred, beta=2))
print('ROC AUC score: ', roc_auc_score(y_val, preds[:,1]), '\n')
def model_scores_os(model, X, y):
'''
Takes in an instantiated model and training data, partitions the training data
into training and validation sets, oversamples the training data, trains the model
on the oversampled training data, and returns evaluation metrics
'''
# Partition data for cross-validation
X_tv, X_test, y_tv, y_test = train_test_split(X, y, test_size=0.2, random_state=5, stratify=y)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=17, stratify=y)
# Oversample since classes are imbalanced
oversampler = RandomOverSampler(sampling_strategy='minority', random_state=0)
X_train, y_train = oversampler.fit_resample(X_train, y_train)
# Train model
model.fit(X_train, y_train)
# Make prediction
y_pred = model.predict(X_val)
preds = model.predict_proba(X_val)
# Print scores
print('Accuracy score: ', accuracy_score(y_val, y_pred))
print('Precision score: ', precision_score(y_val, y_pred))
print('Recall score: ', recall_score(y_val, y_pred))
print('F1 score: ', f1_score(y_val, y_pred))
print('Fbeta score (beta=2): ', fbeta_score(y_val, y_pred, beta=2))
print('ROC AUC score: ', roc_auc_score(y_val, preds[:,1]), '\n')
# Plot confusion matrix
def plot_cm(y_test, y_pred):
'''
Takes in target variable test set and set of predictions from a model
and returns confusion matrix
'''
# Set up confusion matrix
confusion = confusion_matrix(y_test, y_pred)
# Plot confusion matrix
plt.figure(dpi=100)
sns.heatmap(confusion, cmap=plt.cm.Blues, annot=True, square=True,
xticklabels=['No Death', 'Death'],
yticklabels=['No Death', 'Death'])
plt.xlabel('Predicted death')
plt.ylabel('Actual death')
plt.title('Confusion Matrix')
plt.show()
# Plot precision-recall curve
def plot_pr_curve(y_test, preds):
'''
Takes in target variable test set and set of predictions from a model
and plots precision-recall curve
'''
# Set up precsion-recall curve
precision, recall, thresholds = precision_recall_curve(y_test, preds[:,1])
# Plot P-R curve
plt.figure(dpi=80, figsize=(5,5))
plt.plot(thresholds, precision[1:], label='precision')
plt.plot(thresholds, recall[1:], label='recall')
plt.legend(loc='lower left')
plt.xlabel('Threshold')
plt.title('Precision and Recall Curves')
plt.show()
# Plot ROC curve and return AUC score
def roc_auc_curve(y_test, preds):
'''
Takes in target variable test set and set of predictions from a model,
plots ROC curve, and prints ROC AUC score
'''
# Set up ROC curve
fpr, tpr, thresholds = roc_curve(y_test, preds[:,1])
# Plot ROC curve
plt.figure(figsize=(5,5))
plt.plot(fpr, tpr,lw=2)
plt.plot([0,1],[0,1],c='violet',ls='--')
plt.xlim([-0.05,1.05])
plt.ylim([-0.05,1.05])
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.show()
# Print ROC AUC score
print("ROC AUC score = ", roc_auc_score(y_test, preds[:,1]))
# Cross-validation with stratified KFold (only for models without oversampling)
def cv(model, X_tv, y_tv):
'''
Takes in instantiated model and non-test data set, performs cross validation using
5-fold stratified splits, and returns dataframe of train and test evaluation metrics
'''
# Define scoring metrics
scoring = {'accuracy': 'accuracy', 'precision': 'precision', 'recall': 'recall', 'f1': 'f1',
'fbeta': make_scorer(fbeta_score, beta=2), 'auc': 'roc_auc'}
# Cross-validation using stratified KFolds
kf = StratifiedKFold(n_splits=5, shuffle=False)
# Store results of cross-validation function dictionary
cv_dict = cross_validate(model, X_tv, y_tv, scoring=scoring,
cv=kf, n_jobs=-1, return_train_score=True)
# Prepare dictionary of metrics for converting into dataframe
cv_dict_2 = {
'test_accuracy': np.mean(cv_dict['test_accuracy']),
'train_accuracy': np.mean(cv_dict['train_accuracy']),
'test_precision': np.mean(cv_dict['test_precision']),
'train_precision': np.mean(cv_dict['train_precision']),
'test_recall': np.mean(cv_dict['test_recall']),
'train_recall': np.mean(cv_dict['train_recall']),
'test_f1': np.mean(cv_dict['train_f1']),
'train_f1': np.mean(cv_dict['test_f1']),
'test_fbeta': np.mean(cv_dict['train_fbeta']),
'train_fbeta': np.mean(cv_dict['train_fbeta']),
'test_auc': np.mean(cv_dict['test_auc']),
'train_auc': np.mean(cv_dict['train_auc'])
}
# Convert to dataframe
cv_df = | pd.DataFrame.from_dict(cv_dict_2, orient='index', columns=['mean_score']) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# In[78]:
# load Data
# R2 comparison for train set sizes
RFR_score = pd.read_csv('Generated Data/RFR_score.csv')
RFR_score = RFR_score.rename(columns={"N Samples size": "Sample_set", "R-Sqaured": "RFR_r2_score"})
DTR_score = | pd.read_csv('Generated Data/DTR_score.csv') | pandas.read_csv |
import pandas as pd
from functools import reduce
from datetime import datetime
import numpy as np
from EnergyIntensityIndicators.pull_bea_api import BEA_api
from EnergyIntensityIndicators.get_census_data import Econ_census
from EnergyIntensityIndicators.utilities.standard_interpolation \
import standard_interpolation
class NonManufacturing:
""" Prior to 2012, total nonmanufacturing
energy consumption (electricity and fuels) was estimated as a residual
between the supply-side estimates of industrial consumption published
by EIA and the end-user estimates for manufacturing based upon the MECS
(supplemented by census-based data, as described above). The residual-based
method produced very unsatisfactory results; year-to-year changes in
energy consumption were implausible in a large number of instances.
A complicating factor for fuels is that industrial consumption
estimates published by EIA include energy products used as chemical
feedstocks and other nonfuel purposes. As a result, a preliminary
effort was undertaken in mid-2012 to estimate energy consumption
from the user side for these sectors.
"""
def __init__(self, naics_digits):
self.currentYear = datetime.now().year
self.naics_digits = naics_digits
self.BEA_data = \
BEA_api(years=list(range(1949, self.currentYear + 1)))
self.BEA_go_nominal = \
self.BEA_data.get_data(table_name='go_nominal')
self.BEA_go_quant_index = \
self.BEA_data.get_data(table_name='go_quant_index')
self.BEA_va_nominal = \
self.BEA_data.get_data(table_name='va_nominal')
self.BEA_va_quant_index = \
self.BEA_data.get_data(table_name='va_quant_index')
@staticmethod
def indicators_nonman_2018_bea():
"""Reformat value added and gross output chain quantity
indexes from GrossOutput_1967-2018PNNL_213119.xlsx/
ChainQtyIndexes (EA301:EJ349) and
ValueAdded_1969-2018_PNNL_010120.xlsx/
ChainQtyIndexes (EA301:EJ349) respectively
"""
va_quant_index, go_quant_index =\
BEA_api(years=list(range(1949, 2018))).chain_qty_indexes()
return va_quant_index, go_quant_index
def get_econ_census(self):
"""Collect economic census data
Returns:
[type]: [description]
"""
economic_census = Econ_census()
economic_census_years = list(range(1987, self.currentYear + 1, 5))
e_c_data = {str(y): economic_census.get_data(year=y)
for y in economic_census_years}
return e_c_data
@staticmethod
def petroleum_prices(retail_gasoline, retail_diesel,
excl_tax_gasoline, excl_tax_diesel):
"""Get petroleum prices
Args:
retail_gasoline ([type]): [description]
retail_diesel ([type]): [description]
excl_tax_gasoline ([type]): [description]
excl_tax_diesel ([type]): [description]
Returns:
dollar_mmbtu [type]: [description]
lubricant [type]: [description]
"""
retail_gasoline.loc[2011] = 3.527
retail_gasoline.loc[2012] = 3.644
retail_gasoline.loc[2013] = 3.526
retail_gasoline.loc[2014] = 3.367
retail_gasoline.loc[2015] = 2.448
retail_gasoline.loc[2016] = 2.142
retail_gasoline.loc[2017] = 2.408
retail_gasoline['Excl. Tax'] = \
retail_gasoline.divide(
retail_gasoline.loc[1994, 'Retail']).multiply(
excl_tax_gasoline.loc[1994])
retail_gasoline['$/MMBtu'] = \
retail_gasoline.divide(
retail_gasoline.loc[1994, 'Retail']).multiply(
excl_tax_gasoline.loc[1994])
retail_diesel['Excl. Tax'] = \
retail_diesel.divide(
retail_diesel.loc[1994, 'Retail']).multiply(
excl_tax_diesel.loc[1994])
retail_diesel['$/MMBtu'] = \
retail_diesel.divide(
retail_diesel.loc[1994, 'Retail']).multiply(
excl_tax_diesel.loc[1994])
gasoline_weight = 0.3
diesel_weight = 0.7
lubricant_weights = 2
dollar_mmbtu = \
retail_diesel['$/MMBtu'] * diesel_weight + \
retail_gasoline['$/MMBtu'] * gasoline_weight
lubricant = dollar_mmbtu.multiply(lubricant_weights)
return dollar_mmbtu, lubricant
def construction_raw_data(self):
"""Equivalent to Construction_energy_011920.xlsx['Construction']
Returns:
construction_elec [type]: [description]
construction_fuels [type]: [description]
TODO: automatically update data
"""
stb0303 = \
pd.read_excel(
'./EnergyIntensityIndicators/Industry/Data/stb0303.xlsx',
sheet_name='stb0303')
stb0304 = \
pd.read_excel(
'./EnergyIntensityIndicators/Industry/Data/stb0304.xlsx',
sheet_name='stb0304')
stb0523 = \
pd.read_excel(
'./EnergyIntensityIndicators/Industry/Data/stb0523.xlsx',
sheet_name='stb0523')
stb0524 = \
pd.read_csv(
'https://www.eia.gov/totalenergy/data/browser/csv.php?tbl=T09.04')
construction_elec_fuels = \
pd.read_csv(
'./EnergyIntensityIndicators/Industry/Data/construction_elec_fuels.csv').set_index('Year')
construction_elec_fuels = \
construction_elec_fuels.rename(
columns={' Electricity':
'Electricity'})
construction_elec = construction_elec_fuels[['Electricity']]
construction_fuels = construction_elec_fuels[['Total Fuel']]
return construction_elec, construction_fuels
def construction(self):
"""Build data dictionary for the construction sector
https://www.census.gov/data/tables/2017/econ/economic-census/naics-sector-23.html
https://www.census.gov/data/tables/2012/econ/census/construction.html
http://factfinder2.census.gov/faces/tableservices/jsf/pages/productview.xhtml?pid=ECN_2007_US_23I1&prodType=table
http://factfinder2.census.gov/faces/tableservices/jsf/pages/productview.xhtml?pid=ECN_2002_US_23I04A&prodType=table
http://www.census.gov/epcd/www/97EC23.HTM
http://www.census.gov/prod/www/abs/cciview.html
data_dict (dict): [Description]
"""
# NonMan_output_data / M, Y
value_added, gross_output = self.indicators_nonman_2018_bea()
value_added = value_added[['Construction']]
gross_output = \
gross_output[['Construction']].rename(
columns={'Construction': 'Gross Output'})
gross_output['Output*0.0001'] = \
gross_output['Gross Output'].multiply(0.0001)
electricity, fuels = self.construction_raw_data()
elec_intensity = electricity.merge(gross_output,
how='outer',
left_index=True,
right_index=True)
elec_intensity['elec_intensity'] = \
elec_intensity['Electricity'].divide(
elec_intensity['Output*0.0001'].values)
elec_intensity = \
standard_interpolation(elec_intensity,
name_to_interp='elec_intensity',
axis=1).fillna(method='bfill')
fuels_intensity = \
fuels.merge(gross_output, how='outer',
left_index=True, right_index=True)
fuels_intensity['fuels_intensity'] = \
fuels_intensity['Total Fuel'].divide(
fuels_intensity['Output*0.0001'] .values)
fuels_intensity.loc[1982, 'fuels_intensity'] = np.nan
fuels_intensity.loc[2002, 'fuels_intensity'] = np.nan
fuels_intensity = \
standard_interpolation(fuels_intensity,
name_to_interp='fuels_intensity',
axis=1).fillna(method='bfill')
final_electricity = elec_intensity[[
'elec_intensity']].multiply(
elec_intensity['Output*0.0001'], axis='index')
final_electricity = final_electricity.rename(columns={'elec_intensity':
'Construction'})
final_fuels = fuels_intensity[[
'fuels_intensity']].multiply(
fuels_intensity['Output*0.0001'], axis='index')
final_fuels = final_fuels.rename(columns={'fuels_intensity':
'Construction'})
gross_output = gross_output.drop('Output*0.0001', axis=1)
gross_output = gross_output.rename(columns={'Gross Output':
'Construction'})
data_dict = {'energy':
{'elec': final_electricity,
'fuels': final_fuels},
'activity':
{'gross_output': gross_output,
'value_added': value_added}}
return data_dict
def agriculture(self):
"""Build data dictionary for the agricultural sector
Returns:
data_dict (dict): [description]
"""
# Annual Estimates of energy by fuel for the farm sector for the
# period 1965-2002
miranowski_data = \
pd.read_excel(
'./EnergyIntensityIndicators/Industry/Data/miranowski_data.xlsx',
sheet_name='Ag Cons by Use', skiprows=4, skipfooter=9,
usecols='A,F:G', index_col=0,
names=['Year', 'Electricity', 'Direct Ag. Energy Use'])
miranowski_data = miranowski_data.reset_index()
miranowski_data['Year'] = pd.to_numeric(miranowski_data['Year'],
errors='coerce')
miranowski_data = miranowski_data.dropna(
subset=['Year']).set_index('Year')
miranowski_data.index = miranowski_data.index.astype(int)
adjustment_factor = 10500 / 3412 # Assume 10,500 Btu/Kwh
# NonMan_output_data_010420.xlsx column G, S (value added and gross
# output chain qty indexes for farms)
value_added, gross_output = self.indicators_nonman_2018_bea()
value_added.index = value_added.index.astype(int)
gross_output.index = gross_output.index.astype(int)
value_added = value_added[['Farms']]
gross_output = gross_output[['Farms']]
elec_prm = miranowski_data[[
'Electricity']].rename(
columns={'Electricity': 'elec'})
elec_site = elec_prm.divide(adjustment_factor)
elec_site.index = elec_site.index.astype(int)
elec_df = elec_site[~elec_site.index.duplicated()]
fuels_df = miranowski_data[['Direct Ag. Energy Use']].subtract(
elec_prm.values).rename(
columns={'Direct Ag. Energy Use': 'fuels'})
fuels_df = fuels_df[~fuels_df.index.duplicated()]
fuels_df.index = fuels_df.index.astype(int)
elec_df = elec_df.merge(gross_output,
left_index=True,
right_index=True,
how='outer')
fuels_df = fuels_df.merge(gross_output,
left_index=True,
right_index=True,
how='outer')
elec_df['elec_intensity'] = elec_df['elec'].divide(
gross_output['Farms'] * 0.001, axis='index')
fuels_df['fuels_intensity'] = fuels_df['fuels'].divide(
gross_output['Farms'] * 0.001, axis='index')
electricity_final = elec_df[['elec_intensity']].multiply(
gross_output['Farms'] * 0.001, axis='index').ffill()
electricity_final = \
electricity_final.rename(
columns={'elec_intensity': 'Agriculture, Forestry & Fishing'})
electricity_final.index = electricity_final.index.astype(int)
fuels_final = fuels_df[['fuels_intensity']].multiply(
gross_output['Farms'] * 0.001, axis='index')
# Calculated in Agricultural_energy_010420/Farms
fuels_fill = pd.DataFrame([[641.0], [717.2], [657.7], [635.2], [732.1], [638.5],
[791.4], [689.0], [652.1], [675.0], [740.2], [782.8],
[906.9], [929.6], [820.9]],
index=list(range(2003, 2017 + 1)),
columns=['fuels_fill'])
fuels_final = fuels_final.merge(fuels_fill,
how='outer',
left_index=True,
right_index=True)
fuels_final = \
fuels_final.rename(
columns={'fuels_intensity': 'Agriculture, Forestry & Fishing'})
fuels_final = \
fuels_final['Agriculture, Forestry & Fishing'].fillna(
fuels_final['fuels_fill']).to_frame(
name='Agriculture, Forestry & Fishing')
# fuels_final = fuels_final.drop('fuels_fill', axis=1)
fuels_final.index = fuels_final.index.astype(int)
value_added = value_added.rename(
columns={'Farms': 'Agriculture, Forestry & Fishing'})
gross_output = gross_output.rename(
columns={'Farms': 'Agriculture, Forestry & Fishing'})
data_dict = {'energy': {'elec': electricity_final,
'fuels': fuels_final},
'activity': {'gross_output': gross_output,
'value_added': value_added}}
return data_dict
def aggregate_mining_data(self, mining_df, allfos=False):
"""[summary]
Args:
mining_df ([type]): [description]
allfos (bool, optional): [description]. Defaults to False.
Returns:
to_transfer (pd.DataFrame): [description]
"""
mapping = {5: 'Iron and Ferroalloy mining',
6: 'Uranium - vanadium ores',
7: 'Nonferrous metals',
8: 'Anthracite Coal',
9: 'Bituminous Coal',
10: 'Crude Petroleum',
11: 'Natural Gas',
12: 'Natural Gas Liquids',
13: 'Stone and clay mining',
14: 'Chemical and Fertilizer',
15: 'Oil and gas well drilling'}
mapping_df = \
pd.DataFrame.from_dict(mapping, orient='index',
columns=['Industry'])
mapping_df.index.name = 'Year'
mapping_df = mapping_df.reset_index()
if allfos:
mapping_df['Year'] = mapping_df['Year'].subtract(1)
mapping_df['Year'] = mapping_df['Year'].astype(int)
mining_df = mining_df.merge(mapping_df, how='right', on='Year')
mining_df = mining_df.drop(
['Year', 'NAICS'], axis=1).set_index('Industry')
mining_df = mining_df.transpose()
mining_df['Crude Petroleum and Natural Gas'] = \
mining_df[['Crude Petroleum', 'Natural Gas', 'Natural Gas Liquids']].sum(axis=1)
mining_df['Coal Mining'] = \
mining_df[['Anthracite Coal', 'Bituminous Coal']].sum(axis=1)
mining_df['Metal Ore Mining'] = \
mining_df[['Iron and Ferroalloy mining',
'Uranium - vanadium ores',
'Nonferrous metals']].sum(axis=1)
mining_df['Nonmetallic mineral mining'] = \
mining_df[['Stone and clay mining',
'Chemical and Fertilizer']].sum(axis=1)
to_transfer = mining_df[['Crude Petroleum and Natural Gas',
'Coal Mining', 'Metal Ore Mining',
'Nonmetallic mineral mining',
'Oil and gas well drilling']].rename(
columns={'Oil and gas well drilling':
'Support Activities',
'Crude Petroleum and Natural Gas':
'Crude Pet'})
return to_transfer
@staticmethod
def build_mining_output(factor, gross_output, value_added,
elec, fuels, sector_estimates_elec,
sector_estimates_fuels, col_name):
gross_output.index = gross_output.index.astype(int)
elec.index = elec.index.astype(int)
fuels.index = fuels.index.astype(int)
sector_estimates_elec.index = sector_estimates_elec.index.astype(int)
sector_estimates_fuels.index = sector_estimates_fuels.index.astype(int)
"""Build data dictionary for the mining subsector"""
elec = elec.rename(columns={col_name: 'elec'})
fuels = fuels.rename(columns={col_name: 'fuels'})
sector_estimates_elec = \
sector_estimates_elec.rename(
columns={col_name: 'elec'})
sector_estimates_fuels = \
sector_estimates_fuels.rename(
columns={col_name: 'fuels'})
elec = | pd.concat([elec, sector_estimates_elec], axis=0) | pandas.concat |
"""
Plotting code for GPU hardware metrics (i.e., SM occupancy, SM efficiency),
and miscellaneous experiments with GPU utilization.
"""
from rlscope.profiler.rlscope_logging import logger
import argparse
import traceback
import bdb
import copy
import re
import sys
import itertools
import os
import csv
import textwrap
import pprint
import math
from io import StringIO
import json
import codecs
import pandas as pd
from rlscope.parser.plot_utils import setup_matplotlib
setup_matplotlib()
import matplotlib
import matplotlib.ticker
# matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
from os.path import join as _j, abspath as _a, dirname as _d, exists as _e, basename as _b
from rlscope.profiler.util import pprint_msg
from rlscope.parser.stacked_bar_plots import get_x_env, get_x_algo, xfields_from_xtick_expression, get_capsize, OverlapStackedBarPlot, add_repetition, group_numeric_cols
from rlscope.parser.dataframe import UtilDataframeReader, RLScopeConfig
from rlscope import py_config
from rlscope.parser.common import *
from rlscope.parser import constants
from rlscope.parser.plot_utils import is_pdf, pdf2png
from rlscope.py_config import yes_as_bool
from typing import *
class IMLInvaidArgument(Exception):
pass
def maybe_number(x):
if type(x) != str:
return x
try:
num = int(x)
return num
except ValueError:
pass
try:
num = float(x)
return num
except ValueError:
pass
return x
def parse_filename_attrs(
path : str,
file_prefix : str,
file_suffix : str,
attrs : Iterable[str],
dflt_attrs : Optional[Dict[str, Any]] = None):
attr_name_regex = r'(?:{regex})'.format(
regex='|'.join(sorted(attrs, key=lambda attr: (-1*len(attr), attr)))
)
attr_string_regex = r'(?P<attr_name>{attr_name})_(?P<attr_value>[^\.]*)'.format(
attr_name=attr_name_regex
)
# e.g.
# path = 'GPUHwCounterSampler.thread_blocks_68.thread_block_size_1024.csv'
# e.g.
# ['GPUHwCounterSampler', 'thread_blocks_68', 'thread_block_size_1024', 'csv']
components = re.split(r'\.', _b(path))
assert components[0] == file_prefix
assert components[-1] == file_suffix
attr_strings = components[1:len(components)-1]
attr_vals = dict()
if dflt_attrs is not None:
attr_vals.update(dflt_attrs)
for attr_string in attr_strings:
m = re.fullmatch(attr_string_regex, attr_string)
if not m:
raise RuntimeError(f"""
Not sure how to parse attribute name/value from \"{attr_string}\" found in {_b(path)}.
Attributes we recognize = {attrs}
""")
attr_vals[m.group('attr_name')] = m.group('attr_value')
return attr_vals
def parse_path_attrs(
path : str,
attrs : Iterable[str],
dflt_attrs : Optional[Dict[str, Any]] = None,
attr_types : Optional[Dict[str, Any]] = None,
debug : bool = False,
):
attr_name_regex = r'(?:{regex})'.format(
regex='|'.join(sorted(attrs, key=lambda attr: (-1*len(attr), attr)))
)
attr_string_regex = r'(?P<attr_name>{attr_name})_(?P<attr_value>[^\.]*)\b'.format(
attr_name=attr_name_regex
)
# e.g.
# path = 'GPUHwCounterSampler.thread_blocks_68.thread_block_size_1024.csv'
if debug:
logger.info(f"attr_name_regex = {attr_name_regex}")
attr_vals = dict()
if dflt_attrs is not None:
attr_vals.update(dflt_attrs)
path_components = os.path.split(path)
for path_component in path_components:
# e.g.
# ['GPUHwCounterSampler', 'thread_blocks_68', 'thread_block_size_1024', 'csv']
attr_strings = re.split(r'\.', path_component)
for attr_string in attr_strings:
m = re.search(attr_string_regex, attr_string)
if m:
value = m.group('attr_value')
attr_name = m.group('attr_name')
if attr_types is not None and attr_name in attr_types:
value = attr_types[attr_name](value)
attr_vals[attr_name] = value
# if not m:
# raise RuntimeError(f"""
# Not sure how to parse attribute name/value from \"{attr_string}\" found in {path}.
# Attributes we recognize = {attrs}
# """)
missing_attrs = set(attrs).difference(attr_vals.keys())
if len(missing_attrs) > 0:
raise RuntimeError(f"""
Couldn't find all required attributes in {path}.
Attributes we are missing = {missing_attrs}
""")
return attr_vals
METRIC_NAME_CUPTI_TO_PROF = {
# Deprecated CUPTI metric API -- achieved_occupancy:
# Id = 1205
# Shortdesc = Achieved Occupancy
# Longdesc = Ratio of the average active warps per active cycle to the maximum number of warps supported on a multiprocessor
'achieved_occupancy': "sm__warps_active.avg.pct_of_peak_sustained_active",
# Deprecated CUPTI metric API -- sm_efficiency:
# Id = 1203
# Shortdesc = Multiprocessor Activity
# Longdesc = The percentage of time at least one warp is active on a multiprocessor averaged over all multiprocessors on the GPU
# See CUPTI documentation for mapping to new "Profiling API" metric name:
# https://docs.nvidia.com/cupti/Cupti/r_main.html#metrics_map_table_70
'sm_efficiency': "smsp__cycles_active.avg.pct_of_peak_sustained_elapsed",
# Deprecated CUPTI metric API -- inst_executed:
# Metric# 90
# Id = 1290
# Name = inst_executed
# Shortdesc = Instructions Executed
# Longdesc = The number of instructions executed
'inst_executed': "smsp__inst_executed.sum",
# Deprecated CUPTI metric API -- active_cycles:
# Event# 25
# Id = 2629
# Name = active_cycles
# Shortdesc = Active cycles
# Longdesc = Number of cycles a multiprocessor has at least one active warp.
# Category = CUPTI_EVENT_CATEGORY_INSTRUCTION
'active_cycles': "sm__cycles_active.sum",
# Deprecated CUPTI metric API -- active_warps:
# Event# 26
# Id = 2630
# Name = active_warps
# Shortdesc = Active warps
# Longdesc = Accumulated number of active warps per cycle. For every cycle it increments by the number of active warps in the cycle which can be in the range 0 to 64.
# Category = CUPTI_EVENT_CATEGORY_INSTRUCTION
'active_warps': "sm__warps_active.sum",
# Deprecated CUPTI metric API -- elapsed_cycles_sm:
# Event# 33
# Id = 2193
# Name = elapsed_cycles_sm
# Shortdesc = Elapsed clocks
# Longdesc = Elapsed clocks
# Category = CUPTI_EVENT_CATEGORY_INSTRUCTION
'elapsed_cycles_sm': "sm__cycles_elapsed.sum",
}
PROF_TO_METRIC_NAME_CUPTI = dict((v, k) for k, v in METRIC_NAME_CUPTI_TO_PROF.items())
# HACK: number of total SMs on the RTX 2080 GPU on the "eco" cluster machines
NUM_SMS = 68
SM_OCCUPANCY_TITLE = "SM occupancy: average percent of warps\nthat are in use within an SM"
SM_EFFICIENCY_TITLE = "SM efficiency: percent of SMs\nthat are in use across the entire GPU"
SM_EFFICIENCY_Y_LABEL = f"SM efficiency (%)\n# SMs = {NUM_SMS}"
SM_OCCUPANCY_Y_LABEL = "SM occupancy (%)\nmax threads per block = 1024"
SAMPLE_THROUGHPUT_Y_LABEL = "Throughput (samples/second)"
SAMPLE_LATENCY_Y_LABEL = "Minibatch latency (ms)"
CUPTI_METRIC_Y_LABEL = {
'sm_efficiency': SM_EFFICIENCY_Y_LABEL,
'achieved_occupancy': SM_OCCUPANCY_Y_LABEL,
}
CUPTI_METRIC_Y_LABEL_SHORT = {
'sm_efficiency': "SM efficiency (%)",
'achieved_occupancy': "SM occupancy (%)",
}
TRT_METRIC_YLABELS = {
'host_latency_throughput_qps': SAMPLE_THROUGHPUT_Y_LABEL,
'gpu_compute_mean_ms': "Mean GPU compute time (ms)",
'gpu_compute_percentile_99_ms': "99%-tile GPU compute time (ms)",
}
BATCH_SIZE_X_LABEL = "Batch size"
STREAMS_X_LABEL = "# of CUDA streams"
SIMULATOR_X_LABEL = "Simulator"
STEP_THROUGHPUT_Y_LABEL = "Simulation throughput (samples/sec)"
STEP_LATENCY_Y_LABEL = "Simulation latency (ms)"
RLSCOPE_X_LABEL = "(RL algorithm, Simulator)"
SM_ID_X_LABEL = f"SM ID\n# SMs = {NUM_SMS}"
GPU_UTIL_EXPERIMENT_ATTRS = {
'thread_blocks',
'thread_block_size',
'n_launches',
'iterations',
'num_threads',
'processes',
'hw_counters',
}
GPU_UTIL_EXPERIMENT_ATTR_TYPES = {
'thread_blocks': maybe_number,
'thread_block_size': maybe_number,
'n_launches': maybe_number,
'iterations': maybe_number,
'num_threads': maybe_number,
'processes': yes_as_bool,
'hw_counters': yes_as_bool,
}
MULTI_TASK_ATTRS = set(GPU_UTIL_EXPERIMENT_ATTRS)
MULTI_TASK_ATTRS.update({
## From directory attrs
# 'thread_blocks',
# 'thread_block_size',
# 'n_launches',
# 'iterations',
# 'num_threads',
'iterations_per_sched_sample',
# 'processes',
# 'hw_counters',
## GPUComputeSchedInfoKernel.thread_id_9.stream_id_9.trace_id_0.json
'thread_id',
'stream_id',
'trace_id',
})
MULTI_TASK_JSON_ATTRS = {
## From contents of: GPUComputeSchedInfoKernel.thread_id_9.stream_id_9.trace_id_0.json
"globaltimer_ns",
"kernel_id",
"lane_id",
"sm_id",
"stream_id",
"warp_id",
}
MULTI_TASK_ATTR_TYPES = dict(GPU_UTIL_EXPERIMENT_ATTR_TYPES)
MULTI_TASK_ATTR_TYPES.update({
## From directory attrs
# 'thread_blocks',
# 'thread_block_size',
# 'n_launches',
# 'iterations',
# 'num_threads',
'iterations_per_sched_sample': maybe_number,
# 'processes',
# 'hw_counters',
## GPUComputeSchedInfoKernel.thread_id_9.stream_id_9.trace_id_0.json
'thread_id': maybe_number,
'stream_id': maybe_number,
'trace_id': maybe_number,
})
MULTI_TASK_RAW_ATTR_TYPES = dict(MULTI_TASK_ATTR_TYPES)
MULTI_TASK_RAW_ATTR_TYPES.update({
'num_sms': maybe_number,
'sms_allocated': maybe_number,
'CUDA_MPS_ACTIVE_THREAD_PERCENTAGE': maybe_number,
})
# MULTI_TASK_RAW_ATTR_DFLTS = dict(MULTI_TASK)
MULTI_TASK_RAW_ATTR_DFLTS = {
'num_sms': None,
'sms_allocated': None,
'CUDA_MPS_ACTIVE_THREAD_PERCENTAGE': None,
}
MULTI_TASK_RAW_ATTRS = MULTI_TASK_ATTRS.union(MULTI_TASK_RAW_ATTR_TYPES.keys()).difference({
'stream_id',
'thread_id',
'trace_id',
})
# suffix=".num_sms_${NUM_SMS}.sms_allocated_${sms_allocated}.CUDA_MPS_ACTIVE_THREAD_PERCENTAGE_${CUDA_MPS_ACTIVE_THREAD_PERCENTAGE}"
# all_cycles:
# the metric is computed over all cycles on the GPU, including cycles where the GPU
# is idle and not executing any kernels.
# active_cycles:
# the metric is computed over active GPU cycles.
# Measurement periods where the GPU is idle result in a metric value of "0".
MEASUREMENT_PERIOD_ACTIVE_CYCLES = 'active_cycles'
MEASUREMENT_PERIOD_ALL_CYCLES = 'all_cycles'
CUPTI_METRIC_MEASUREMENT_PERIOD = {
'achieved_occupancy': MEASUREMENT_PERIOD_ACTIVE_CYCLES,
'sm_efficiency': MEASUREMENT_PERIOD_ALL_CYCLES,
'inst_executed': MEASUREMENT_PERIOD_ACTIVE_CYCLES,
'active_cycles': MEASUREMENT_PERIOD_ACTIVE_CYCLES,
'active_warps': MEASUREMENT_PERIOD_ACTIVE_CYCLES,
'elapsed_cycles_sm': MEASUREMENT_PERIOD_ALL_CYCLES,
}
FLOAT_RE = r'(?:[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?)'
UNIT_RE = r'(?:\b(?:ms|s|qps)\b)'
class TrtexecExperiment:
def __init__(self, args):
self.args = args
def run(self):
self.read_df()
self.plot_df()
def read_df(self):
self._read_trtexec_df()
self._read_tf_inference_df()
self._read_simulator_df()
self._read_mps_df()
"""
TODO: merge trtexec_df and tf_inference_df
trtexec_field tf_inference_field
host_latency_throughput_qps throughput_qps
"""
def plot_df(self):
"""
Plot trtexec7 experiments.
:return:
"""
"""
batch_size = 1, 8, 16, 32, 64
streams = 1
plot:
throughput
sm_efficiency
sm_occupancy
"""
def _plot_batch_size_vs(streams, suffix=None):
self._plot_batch_size_vs_throughput(
title="Throughput with increasing batch size",
streams=streams,
suffix=suffix)
def filter_tensorflow(plot_df):
plot_df = plot_df[plot_df['config'] == 'TF']
return plot_df
self._plot_batch_size_vs_throughput(
title="Throughput with increasing batch size",
streams=streams,
filter_df=filter_tensorflow,
suffix=f"{or_empty(suffix)}.just_tensorflow")
self._plot_batch_size_vs_metric(
title=SM_EFFICIENCY_TITLE,
cupti_metric='sm_efficiency',
streams=streams,
suffix=suffix)
self._plot_batch_size_vs_metric(
title=SM_OCCUPANCY_TITLE,
cupti_metric='achieved_occupancy',
streams=streams,
suffix=suffix)
_plot_batch_size_vs(streams=1)
def _plot_streams_vs(batch_size, suffix=None):
def _title(title):
return f"{title}:\n(batch size = {batch_size})"
trt_metric_title = {
'host_latency_throughput_qps': _title("Throughput with increasing streams"),
'gpu_compute_mean_ms': _title("Mean GPU compute time with increasing streams"),
'gpu_compute_percentile_99_ms': _title("99-%tile GPU compute time with increasing streams"),
}
cuda_graph_dict = {
'host_latency_throughput_qps': None,
'gpu_compute_mean_ms': None,
'gpu_compute_percentile_99_ms': None,
}
for trt_metric in trt_metric_title.keys():
self._plot_streams_vs_trt_metric(
trt_metric, batch_size,
title=trt_metric_title[trt_metric],
cuda_graph=cuda_graph_dict.get(trt_metric, None))
# self._plot_streams_vs_throughput(
# title="Throughput with increasing streams\n(batch size = {batch_size})".format(batch_size=batch_size),
# batch_size=batch_size,
# suffix=suffix)
self._plot_streams_vs_metric(
# title="Throughput with increasing streams\n(batch size = {batch_size})".format(batch_size=batch_size),
title=SM_EFFICIENCY_TITLE,
cupti_metric='sm_efficiency',
batch_size=batch_size,
suffix=suffix)
self._plot_streams_vs_metric(
# title="Throughput with increasing streams\n(batch size = {batch_size})".format(batch_size=batch_size),
title=SM_OCCUPANCY_TITLE,
cupti_metric='achieved_occupancy',
batch_size=batch_size,
suffix=suffix)
"""
batch_size = 1
streams = 1, 2, 3, ..., 8
plot:
throughput
sm_efficiency
sm_occupancy
"""
_plot_streams_vs(batch_size=1)
if self.trtexec_df is not None:
"""
batch_size = (best batch size for streams == 1)
streams = 1, 2, 3, ..., 8
plot:
throughput
sm_efficiency
sm_occupancy
"""
best_batch_size = self._compute_best_batch_size()
_plot_streams_vs(batch_size=best_batch_size, suffix='.best_batch_size')
self._plot_simulator_vs_steptime()
self._plot_simulator_vs_throughput()
def _plot_multiprocess_inference(df, throughput_title=None, inference_title=None, filter_df=None, suffix=None):
# if throughput_title is None:
# throughput_title = 'Increasing inference throughput when slicing SMs with CUDA MPS processes'
# if inference_title is None:
# inference_title = 'Inference latency when slicing SMs with CUDA MPS processes'
self._plot_mps_batch_size_vs_metric_by_num_tasks(
df=self.mps_df,
metric='throughput_qps',
title=throughput_title,
xlabel=BATCH_SIZE_X_LABEL,
ylabel=SAMPLE_THROUGHPUT_Y_LABEL,
filter_df=filter_df,
suffix=suffix,
global_ymax=True,
)
self._plot_mps_batch_size_vs_metric_by_num_tasks(
df=self.mps_raw_df,
metric='inference_time_ms',
title=inference_title,
xlabel=BATCH_SIZE_X_LABEL,
ylabel=SAMPLE_LATENCY_Y_LABEL,
filter_df=filter_df,
suffix=suffix,
global_ymax=False,
)
"""
3 different graphs for multi-process experiment:
- Multi-process (CPU) / config_cpu
row['cpu']
assert not row['mps']
- Multi-process MPS (GPU) / config_mps_gpu_evenly
row['mps'] and row['sm_alloc_strategy'] == 'evenly'
assert not row['cpu']
- Multi-process MPS (GPU) / config_mps_gpu_evenly_x2
row['mps'] and row['sm_alloc_strategy'] == 'evenly_x2'
assert not row['cpu']
- Multi-process (GPU, no MPS) / config_gpu
not row['mps'] and not row['cpu']
"""
def is_config_cpu(row):
is_cpu = row['cpu']
if is_cpu:
assert not row['mps']
return is_cpu
# def is_config_mps_gpu_evenly(row):
# is_mps = row['mps']
# if is_mps:
# assert not row['cpu']
# return is_mps and row['sm_alloc_strategy'] == 'evenly'
#
# def is_config_mps_gpu_evenly_x2(row):
# is_mps = row['mps']
# if is_mps:
# assert not row['cpu']
# return is_mps and row['sm_alloc_strategy'] == 'evenly_x2'
def is_config_mps_gpu(row):
is_mps = row['mps']
if is_mps:
assert not row['cpu']
return is_mps
def is_config_gpu(row):
return not row['mps'] and not row['cpu']
def as_row_filter_func(is_config):
def row_filter_func(df):
df = df[df.apply(is_config, axis=1)]
return df
return row_filter_func
# throughput_ymax = self.mps_df['']
sm_alloc_strategies = self.mps_df[self.mps_df['mps']]['sm_alloc_strategy'].unique().tolist()
for sm_alloc_strategy in sm_alloc_strategies:
def _is_config(row):
return is_config_mps_gpu(row) and row['sm_alloc_strategy'] == sm_alloc_strategy
_plot_multiprocess_inference(
self.mps_df,
throughput_title='Inference throughput:\nmulti-process TF scripts (GPU) + CUDA MPS',
inference_title='Inference latency:\nmulti-process TF scripts (GPU) + CUDA MPS',
filter_df=as_row_filter_func(_is_config),
suffix=f".config_mps_gpu_{sm_alloc_strategy}")
# _plot_multiprocess_inference(self.mps_df, filter_df=as_row_filter_func(is_config_mps_gpu_evenly), suffix='.config_mps_gpu_evenly')
# _plot_multiprocess_inference(self.mps_df, filter_df=as_row_filter_func(is_config_mps_gpu_evenly_x2), suffix='.config_mps_gpu_evenly_x2')
_plot_multiprocess_inference(
self.mps_df,
throughput_title='Inference throughput:\nmulti-process TF scripts (CPU)',
inference_title='Inference latency:\nmulti-process TF scripts (CPU)',
filter_df=as_row_filter_func(is_config_cpu),
suffix='.config_cpu')
_plot_multiprocess_inference(
self.mps_df,
throughput_title='Inference throughput:\nmulti-process TF scripts (GPU)',
inference_title='Inference latency:\nmulti-process TF scripts (GPU)',
filter_df=as_row_filter_func(is_config_gpu),
suffix='.config_gpu')
def _compute_best_batch_size(self):
df = self.trtexec_df[self.trtexec_df['streams'] == 1]
max_throughput = df['host_latency_throughput_qps'].max()
batch_sizes = df[df['host_latency_throughput_qps'] == max_throughput]['batch_size'].unique()
assert len(batch_sizes) == 1
best_batch_size = batch_sizes[0]
return best_batch_size
def _plot_streams_vs_metric(self, title, cupti_metric, batch_size, ylabel=None, suffix=None):
if self.trtexec_gpu_hw_df is None:
return
df = copy.copy(self.trtexec_gpu_hw_df)
"""
WANT:
x_field: batch_size
y_field: metric_value
group_field: num_threads
"""
df = df[df['batch_size'] == batch_size]
df = keep_cupti_metric(df, cupti_metric)
add_gpu_hw_fields(df)
df = self._add_config(df, df_type='trtexec')
# titled_df = copy.copy(df)
# col_titles = {
# 'num_threads': 'Number of threads',
# }
# titled_df.rename(columns=col_titles, inplace=True)
sns.set(style="whitegrid")
# df = df[["thread_blocks", "metric_value", "num_threads"]]
g = sns.catplot(x="streams", y="metric_value",
hue="config",
data=df,
# hue="num_threads", data=df,
# hue=col_titles["num_threads"], data=titled_df,
# height=6,
kind="bar",
palette="muted"
)
g.despine(left=True)
if ylabel is None:
ylabel = CUPTI_METRIC_Y_LABEL[cupti_metric]
g.set_ylabels(ylabel)
g.set_xlabels(STREAMS_X_LABEL)
# title = "SM efficiency: percent of SMs\nthat are in use across the entire GPU"
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
if suffix is None:
suffix = ""
save_plot(df, _j(self.args['trtexec_dir'], f'streams_vs_{cupti_metric}.batch_size_{batch_size}{suffix}.svg'))
def _plot_batch_size_vs_metric(self, title, cupti_metric, streams, ylabel=None, suffix=None):
if self.trtexec_gpu_hw_df is None:
return
"""
WANT:
x_field: batch_size
y_field: metric_value
group_field: num_threads
"""
plot_df = pd.DataFrame(columns=['batch_size', 'metric_value', 'config'])
if self.trtexec_gpu_hw_df is not None:
df = copy.copy(self.trtexec_gpu_hw_df)
df = df[df['streams'] == streams]
df = keep_cupti_metric(df, cupti_metric)
add_gpu_hw_fields(df)
df = self._add_config(df, df_type='trtexec')
plot_df = plot_df.append(df[plot_df.columns])
if self.tf_inference_gpu_hw_df is not None:
df = copy.copy(self.tf_inference_gpu_hw_df)
df = df[df['range_name'] == 'inference_loop/inference']
df = keep_cupti_metric(df, cupti_metric)
add_gpu_hw_fields(df)
df = self._add_config(df, df_type='tf_inference')
plot_df = plot_df.append(df[plot_df.columns])
plot_df.sort_values(by=['config', 'batch_size'], inplace=True)
# titled_df = copy.copy(df)
# col_titles = {
# 'num_threads': 'Number of threads',
# }
# titled_df.rename(columns=col_titles, inplace=True)
sns.set(style="whitegrid")
# df = df[["thread_blocks", "metric_value", "num_threads"]]
g = sns.catplot(x="batch_size", y="metric_value",
hue="config",
data=plot_df,
# hue="num_threads", data=df,
# hue=col_titles["num_threads"], data=titled_df,
# height=6,
kind="bar",
palette="muted"
)
g.despine(left=True)
if ylabel is None:
ylabel = CUPTI_METRIC_Y_LABEL[cupti_metric]
g.set_ylabels(ylabel)
g.set_xlabels(BATCH_SIZE_X_LABEL)
# title = "SM efficiency: percent of SMs\nthat are in use across the entire GPU"
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
if suffix is None:
suffix = ""
save_plot(plot_df, _j(self.args['trtexec_dir'], f'batch_size_vs_{cupti_metric}.streams_{streams}{suffix}.svg'))
def _plot_streams_vs_trt_metric(self, trt_metric, batch_size, title=None, ylabel=None, alias=None, cuda_graph=None, suffix=None):
if self.trtexec_df is None:
return
if alias is None:
alias = trt_metric
df = copy.copy(self.trtexec_df)
"""
WANT:
x_field: batch_size
y_field: metric_value
group_field: num_threads
"""
df = df[df['batch_size'] == batch_size]
# df = keep_cupti_metric(df, cupti_metric)
# titled_df = copy.copy(df)
# col_titles = {
# 'num_threads': 'Number of threads',
# }
# titled_df.rename(columns=col_titles, inplace=True)
df = self._add_config(df, df_type='trtexec')
sns.set(style="whitegrid")
plot_kwargs = dict(
x="streams",
y=trt_metric,
kind="bar",
palette="muted",
)
if cuda_graph is None:
plot_kwargs.update(dict(
hue="config",
))
elif cuda_graph:
df = df[df['cuda_graph']]
else:
df = df[~ df['cuda_graph']]
plot_kwargs.update(dict(
data=df,
))
g = sns.catplot(**plot_kwargs)
g.despine(left=True)
if ylabel is None:
ylabel = TRT_METRIC_YLABELS[trt_metric]
g.set_ylabels(ylabel)
# if xlabel is not None:
g.set_xlabels(STREAMS_X_LABEL)
if title is not None:
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
ss = StringIO()
if cuda_graph is None:
pass
elif cuda_graph:
ss.write(f".cuda_graph_yes")
else:
ss.write(f".cuda_graph_no")
if suffix is not None:
ss.write(f".{suffix}")
ss = ss.getvalue()
save_plot(df, _j(self.args['trtexec_dir'], f'streams_vs_{alias}.batch_size_{batch_size}{ss}.svg'))
def _plot_mps_batch_size_vs_metric_by_num_tasks(self, df, metric, title=None, xlabel=None, ylabel=None, filter_df=None, suffix=None, global_ymax=False):
"""
Throughput graph:
Y-axis = throughput
X-axis (major) = batch-size (larger impact on throughput)
X-axis (minor) = num_tasks (lesser impact on throughput)
Latency graph:
Y-axis = latency samples (mean/std across all processes)
X-axis (major) = batch-size (larger impact on latency)
X-axis (minor) = num_tasks (lesser impact on latency)
"""
if df is None:
return
df = copy.copy(df)
assert metric in df
# df = self._add_config(df, df_type='trtexec')
global_df = df
if filter_df is not None:
df = filter_df(df)
sns.set(style="whitegrid")
g = sns.catplot(x="batch_size",
y=metric,
# data=df,
hue="config",
data=df,
# hue="num_threads", data=df,
# hue=col_titles["num_threads"], data=titled_df,
# height=6,
kind="bar",
palette="muted"
)
g.despine(left=True)
if ylabel is not None:
g.set_ylabels(ylabel)
if xlabel is not None:
g.set_xlabels(xlabel)
if title is not None:
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
if global_ymax:
new_ymax = global_df[metric].max()
ymin, ymax = g.ax.get_ylim()
g.ax.set_ylim((ymin, max(ymax, new_ymax)))
if suffix is None:
suffix = ""
save_plot(df, _j(self.args['mps_dir'], f'mps_batch_size_vs_{metric}_by_num_tasks{suffix}.svg'))
def _plot_streams_vs_throughput(self, title, batch_size, suffix=None):
if self.trtexec_df is None:
return
df = copy.copy(self.trtexec_df)
"""
WANT:
x_field: batch_size
y_field: metric_value
group_field: num_threads
"""
df = df[df['batch_size'] == batch_size]
# df = keep_cupti_metric(df, cupti_metric)
# titled_df = copy.copy(df)
# col_titles = {
# 'num_threads': 'Number of threads',
# }
# titled_df.rename(columns=col_titles, inplace=True)
df = self._add_config(df, df_type='trtexec')
sns.set(style="whitegrid")
g = sns.catplot(x="streams", y="host_latency_throughput_qps",
# data=df,
hue="config", data=df,
# hue="num_threads", data=df,
# hue=col_titles["num_threads"], data=titled_df,
# height=6,
kind="bar",
palette="muted"
)
g.despine(left=True)
g.set_ylabels(SAMPLE_THROUGHPUT_Y_LABEL)
g.set_xlabels(STREAMS_X_LABEL)
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
if suffix is None:
suffix = ""
save_plot(df, _j(self.args['trtexec_dir'], f'streams_vs_throughput.batch_size_{batch_size}{suffix}.svg'))
def _add_config(self, df, df_type):
assert df_type in {'trtexec', 'tf_inference'}
if df_type == 'trtexec':
def _config(row):
if row['cuda_graph']:
return 'TensorRT - CUDA graph ON'
return 'TensorRT'
df['config'] = df.apply(_config, axis=1)
elif df_type == 'tf_inference':
def _config(row):
if row['xla']:
return 'TF - XLA ON'
return 'TF'
df['config'] = df.apply(_config, axis=1)
else:
raise NotImplementedError()
return df
def _plot_batch_size_vs_throughput(self, title, streams, filter_df=None, suffix=None):
if self.trtexec_df is None:
return
"""
WANT:
x_field: batch_size
y_field: metric_value
group_field: num_threads
"""
plot_df = pd.DataFrame(columns=['batch_size', 'throughput_qps', 'config'])
if self.trtexec_df is not None:
df = copy.copy(self.trtexec_df)
df = df[df['streams'] == streams]
df.rename(columns={
'host_latency_throughput_qps': 'throughput_qps',
}, inplace=True)
df = self._add_config(df, df_type='trtexec')
plot_df = plot_df.append(df[plot_df.columns])
if self.tf_inference_result_df is not None:
df = copy.copy(self.tf_inference_result_df)
df = self._add_config(df, df_type='tf_inference')
plot_df = plot_df.append(df[plot_df.columns])
plot_df.sort_values(by=['config', 'batch_size'], inplace=True)
if filter_df is not None:
plot_df = filter_df(plot_df)
# df = keep_cupti_metric(df, cupti_metric)
# titled_df = copy.copy(df)
# col_titles = {
# 'num_threads': 'Number of threads',
# }
# titled_df.rename(columns=col_titles, inplace=True)
sns.set(style="whitegrid")
# df = df[["thread_blocks", "metric_value", "num_threads"]]
g = sns.catplot(x="batch_size", y="throughput_qps",
# data=df,
hue="config", data=plot_df,
# hue=col_titles["num_threads"], data=titled_df,
# height=6,
kind="bar",
palette="muted"
)
g.despine(left=True)
g.set_ylabels(SAMPLE_THROUGHPUT_Y_LABEL)
g.set_xlabels(BATCH_SIZE_X_LABEL)
# title = "SM efficiency: percent of SMs\nthat are in use across the entire GPU"
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
if suffix is None:
suffix = ""
save_plot(plot_df, _j(self.args['trtexec_dir'], f'batch_size_vs_throughput.streams_{streams}{suffix}.svg'))
def parse_trtexec_logs_as_df(self, logs):
def each_field_value(log):
for section in log:
for attr, value in log[section].items():
field = f"{section}_{attr}"
yield field, value
all_fields = set()
if len(logs) > 0:
all_fields = set([field for field, value in each_field_value(logs[0])])
data = dict()
for log in logs:
for field, value in each_field_value(log):
if field not in all_fields:
raise RuntimeError(f"Saw unexpected field={field}; expected one of {all_fields}")
if field not in data:
data[field] = []
data[field].append(value)
df = pd.DataFrame(data)
return df
def parse_trtexec_log(self, trtexec_log_path):
"""
{
'host_latency': {
'min_ms': 0.123,
'mean_ms': 0.123,
...
}
}
:param trtexec_log_path:
:return:
"""
with open(trtexec_log_path) as f:
section = None
data = dict()
def strip_log_prefix(line):
line = re.sub(r'^\[[^\]]+\]\s+\[I\]\s+', '', line)
return line
def as_attr(section):
attr = section
attr = re.sub(' ', '_', attr)
attr = attr.lower()
return attr
def parse_section(line):
m = re.search(r'(?P<section>Host Latency|GPU Compute|Enqueue Time)$', line, flags=re.IGNORECASE)
if m:
section = as_attr(m.group('section'))
return section
return None
def parse_e2e_metric(line):
# NOTE: end-to-end is the time = endOutput - startInput
# non-end-to-end = (endInput + startInput) + (endCompute + startCompute) + (endOutput + startOutput)
# So, "end-to-end" will include some time spent host-side, whereas non-end-to-end just includes time spent GPU side
# (the transfers, the kernel running).
m = re.search(r'(?P<name>min|max|mean|median): (?P<value>{float}) {unit} \(end to end (?P<e2e_value>{float}) (?P<unit>{unit})\)'.format(
float=FLOAT_RE,
unit=UNIT_RE), line)
if m:
# Just ignore this value...
value = float(m.group('value'))
e2e_value = float(m.group('e2e_value'))
name = "{name}_{unit}".format(name=m.group('name'), unit=m.group('unit'))
name = as_attr(name)
return {
'name': name,
'value': e2e_value,
}
return None
def parse_metric_with_unit(line):
m = re.search(r'(?P<name>[a-zA-Z][a-zA-Z ]+): (?P<value>{float}) (?P<unit>{unit})'.format(
float=FLOAT_RE,
unit=UNIT_RE), line)
if m:
value = float(m.group('value'))
name = "{name}_{unit}".format(name=m.group('name'), unit=m.group('unit'))
name = as_attr(name)
return {
'name': name,
'value': value,
}
return None
def parse_percentile(line):
m = re.search(r'(?P<name>percentile): (?P<value>{float}) (?P<unit>{unit}) at (?P<percent>\d+)%'.format(
float=FLOAT_RE,
unit=UNIT_RE), line)
if m:
value = float(m.group('value'))
name = "{name}_{percent}_{unit}".format(
name=m.group('name'),
percent=m.group('percent'),
unit=m.group('unit'))
name = as_attr(name)
return {
'name': name,
'value': value,
}
return None
def parse_e2e_percentile(line):
m = re.search(r'(?P<name>percentile): [^(]+\(end to end (?P<value>{float}) (?P<unit>{unit}) at (?P<percent>\d+)%\)'.format(
float=FLOAT_RE,
unit=UNIT_RE), line)
if m:
value = float(m.group('value'))
name = "{name}_{percent}_{unit}".format(
name=m.group('name'),
percent=m.group('percent'),
unit=m.group('unit'))
name = as_attr(name)
return {
'name': name,
'value': value,
}
return None
def _add_parsed_value(dic):
if section not in data:
data[section] = dict()
data[section][dic['name']] = dic['value']
for lineno, line in enumerate(f, start=1):
line = line.rstrip()
ret = parse_section(line)
if ret:
section = ret
continue
if section is None:
continue
line = strip_log_prefix(line)
ret = parse_e2e_metric(line)
if ret:
_add_parsed_value(ret)
continue
ret = parse_e2e_percentile(line)
if ret:
_add_parsed_value(ret)
continue
ret = parse_percentile(line)
if ret:
_add_parsed_value(ret)
continue
ret = parse_metric_with_unit(line)
if ret:
_add_parsed_value(ret)
continue
if self.debug:
logger.info("Skip {path}:{lineno}: {line}".format(
path=trtexec_log_path,
lineno=lineno,
line=line,
))
return data
@property
def debug(self):
return self.args['debug']
def _read_mps_df(self):
self.mps_df = None
self.mps_raw_df = None
if self.args['mps_dir'] is None:
return
"""
/home/jgleeson/clone/rlscope/output/microbench_inference_multiprocess/batch_size_128.num_tasks_1.env_id_BreakoutNoFrameskip-v4.num_sms_68.sms_allocated_68.CUDA_MPS_ACTIVE_THREAD_PERCENTAGE_100.0
"""
mps_dflt_attrs = {
'num_sms': None,
'sms_allocated': None,
'sm_alloc_strategy': None,
'CUDA_MPS_ACTIVE_THREAD_PERCENTAGE': None,
}
mps_attr_types = {
'mps': yes_as_bool,
'cpu': yes_as_bool,
'batch_size': maybe_number,
'num_tasks': maybe_number,
'env_id': str,
'num_sms': maybe_number,
'sms_allocated': maybe_number,
'sm_alloc_strategy': str,
'CUDA_MPS_ACTIVE_THREAD_PERCENTAGE': maybe_number,
}
mps_attrs = set(mps_attr_types.keys())
dfs = []
raw_dfs = []
for path in each_file_recursive(self.args['mps_dir']):
if not re.search(r'^mode_microbench_inference_multiprocess\.merged\.json$', _b(path)):
continue
js = load_json(path)
df = pd.DataFrame(
dict((k, [v]) for k, v in js['summary_metrics'].items())
)
attr_dict = parse_path_attrs(
path,
mps_attrs,
mps_dflt_attrs,
mps_attr_types,
)
for attr_name, attr_value in attr_dict.items():
df[attr_name] = attr_value
dfs.append(df)
# Q: Should we discard outliers...?
raw_df = pd.DataFrame(data=js['raw_samples'])
for attr_name, attr_value in attr_dict.items():
raw_df[attr_name] = attr_value
raw_dfs.append(raw_df)
self.mps_df = pd.concat(dfs)
self.mps_raw_df = pd.concat(raw_dfs)
def _add_config(df):
def _config(row):
if row['mps']:
assert row['CUDA_MPS_ACTIVE_THREAD_PERCENTAGE'] is not None
return multitask_title('process MPS', 'processes', n_tasks=row['num_tasks'], sep=' ')
assert row['CUDA_MPS_ACTIVE_THREAD_PERCENTAGE'] is None
return multitask_title('process', 'processes', n_tasks=row['num_tasks'], sep=' ')
df['config'] = df.apply(_config, axis=1)
return df
def _sort(df):
df = df.sort_values(by=['batch_size', 'num_tasks'])
return df
def _prepare_df(df):
df = _add_config(df)
df = _sort(df)
return df
self.mps_df = _prepare_df(self.mps_df)
self.mps_raw_df = _prepare_df(self.mps_raw_df)
self.mps_raw_df['inference_time_ms'] = self.mps_raw_df['inference_time_sec'] * 1000
logger.info("mps dataframe:\n{msg}".format(
msg=txt_indent(DataFrame.dataframe_string(self.mps_df), indent=1),
))
logger.info("mps_raw dataframe:\n{msg}".format(
msg=txt_indent(DataFrame.dataframe_string(self.mps_raw_df), indent=1),
))
def _read_simulator_df(self):
self.simulator_df = None
self.simulator_raw_df = None
if self.args['simulator_dir'] is None:
return
"""
/home/jgleeson/clone/rlscope/output/simulator/batch_size_8.xla_no/GPUHwCounterSampler.csv
"""
simulator_dflt_attrs = {
}
simulator_attrs = {
'env_id',
}
simulator_attr_types = {
'env_id': str,
}
dfs = []
raw_dfs = []
for path in each_file_recursive(self.args['simulator_dir']):
if not re.search(r'^mode_microbench_simulator\.json$', _b(path)):
continue
js = load_json(path)
df = pd.DataFrame(
dict((k, [v]) for k, v in js['summary_metrics'].items())
)
sm_attrs = parse_path_attrs(
path,
simulator_attrs,
simulator_dflt_attrs,
simulator_attr_types,
)
for attr_name, attr_value in sm_attrs.items():
df[attr_name] = attr_value
dfs.append(df)
# Q: Should we discard outliers...?
raw_df = pd.DataFrame(data={
'step_time_sec': js['raw_samples']['step_time_sec']
})
for attr_name, attr_value in sm_attrs.items():
raw_df[attr_name] = attr_value
raw_dfs.append(raw_df)
self.simulator_df = pd.concat(dfs)
self.simulator_raw_df = pd.concat(raw_dfs)
self.simulator_df = self._add_simulator(self.simulator_df)
self.simulator_raw_df = self._add_simulator(self.simulator_raw_df)
logger.info("simulator dataframe:\n{msg}".format(
msg=txt_indent(DataFrame.dataframe_string(self.simulator_df), indent=1),
))
logger.info("simulator_raw dataframe:\n{msg}".format(
msg=txt_indent(DataFrame.dataframe_string(self.simulator_raw_df), indent=1),
))
def _plot_simulator_vs_steptime(self):
"""
x = simulator
y = mean step time (seconds)
:return:
"""
if self.simulator_raw_df is None:
return
plot_df = | pd.DataFrame(columns=['simulator', 'step_time_ms']) | pandas.DataFrame |
"""Module responsible for grouping the related row of elements and to it's
respective columns"""
from multiprocessing import Process, Queue
from typing import List, Dict
# pylint: disable=relative-beyond-top-level
import pandas as pd
from PIL import Image
from mystique.extract_properties import CollectProperties
from .container_group import ContainerGroup
from .ds_helper import DsHelper, ContainerDetailTemplate
from .objects_group import RowColumnGrouping
def get_layout_structure(predicted_objects: List, queue=None) -> List:
"""
method handles the hierarchical layout generating
@param predicted_objects: detected list of design objects from the model
@param queue: Queue object of the calling process
as a part of multi-process queue
@return: generated hierarchical card layout
"""
card_layout = []
# group row and columns
# sorting the design objects y way
predicted_objects = [
value
for _, value in sorted(
zip(
| pd.DataFrame(predicted_objects) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Class for co-locationship, meetup_strategy
# (c) <NAME>, 2020-12-09
# <EMAIL>
import pandas as pd
import numpy as np
from scipy.stats import spearmanr
from scipy.stats import kendalltau
from scipy.stats import ttest_ind
from scipy.stats import ttest_rel
def spearman_kendall_test(df, item, alpha=0.05, increasing=True,
rank_in='Rank',
category_in='category',
dataset_in='dataset',
userid_in='userid'
):
"""
Do spearman's and kendall's test for the increasing or decreasing trend.
:param df: dataframe, it should include both column 'item' and column 'ranking'
:param item: string, column of target's label
:param rank_in:string, column of rank's label
:param category_in: string, column of category's label
:param userid_in: string, column of userid's label
:param dataset_in: string, column of dataset's label
:param alpha: significant level
:param increasing: bool, test for increasing trend or decreasing trend
:return: dataframe filled in all test results
"""
category = sorted(list(set(df[category_in].tolist())))
dataset = sorted(list(set(df[dataset_in].tolist())))
test_result = []
for ds in dataset:
for cat in category:
count_sm, count_kd = 0, 0
df_temp = df[(df[dataset_in] == ds) & (df[category_in] == cat)]
ur_ds = df_temp[userid_in].unique().tolist()
for user in ur_ds:
rank = df_temp[df_temp[userid_in] == user][rank_in].tolist()
item_specify = df_temp[df_temp[userid_in] == user][item].tolist()
coef_sm, p_sm = spearmanr(rank, item_specify)
coef_kd, p_kd = kendalltau(rank, item_specify)
if increasing:
if (coef_sm > 0) & (p_sm < alpha):
count_sm += 1
if (coef_kd > 0) & (p_kd < alpha):
count_kd += 1
else:
if (coef_sm < 0) & (p_sm < alpha):
count_sm += 1
if (coef_kd < 0) & (p_kd < alpha):
count_kd += 1
test_result.append([ds, cat,
count_sm, count_sm / len(ur_ds),
count_kd, count_kd / len(ur_ds),
len(ur_ds)]
)
stats_test = pd.DataFrame(test_result, columns=[dataset_in,
category_in,
'SpN', 'SpP', 'Kn', 'Kp',
'total']
).sort_values([dataset_in, category_in])
return stats_test
def two_side_t_test(df, item, alpha=0.01, method='paired', difference=False,
category_in='category',
dataset_in='dataset',
userid_in='userid'
):
"""
Do two-side t test, including t-test and paired sample t-test.
:param df: dataframe, it should include the column 'item'
:param item: string, column of target's label
:param category_in: string, column of category's label
:param userid_in: string, column of userid's label
:param dataset_in: string, column of dataset's label
:param alpha: significant level
:param method: string, using 'paired' or not
:param difference: bool, test for difference or for same
:return: a nested list filled with dataframe of test results, and a list of datasets' names
"""
category = sorted(list(set(df[category_in].tolist())))
dataset = sorted(list(set(df[dataset_in].tolist())))
n_cat = len(category)
if method is 'paired':
func = ttest_rel
else:
func = ttest_ind
stats_list = []
for ds in dataset:
df_temp = df[df[dataset_in] == ds]
ur_ds = df_temp[userid_in].unique().tolist()
n_users = len(ur_ds)
result = []
for cat1 in category:
for cat2 in category:
count = 0
for user in ur_ds:
df_cat1 = df_temp[(df_temp[category_in] == cat1) & (df_temp[userid_in] == user)][item]
df_cat2 = df_temp[(df_temp[category_in] == cat2) & (df_temp[userid_in] == user)][item]
stats, p = func(df_cat1, df_cat2)
if difference:
if (p < alpha) | (np.isnan(p)):
count += 1
else:
if (p > alpha) | (np.isnan(p)):
count += 1
result.append(count / n_users)
result = np.array(result).reshape(n_cat, n_cat)
result = | pd.DataFrame(result, columns=category, index=category) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib
from datetime import datetime as dt
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout, Activation, Bidirectional
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.metrics import mean_squared_error
def readCSV(state_name):
df = | pd.read_csv('prediction_model/state_fb1.csv') | pandas.read_csv |
import itertools
from typing import Any, Callable, Sequence, Tuple
import dill as pickle
import jax.numpy as np
import numpy as onp
import pandas as pd
from jax import grad, jit, ops, random
from jax.experimental.optimizers import Optimizer, adam
from pzflow import distributions
from pzflow.bijectors import Bijector_Info, InitFunction, Pytree
from pzflow.utils import build_bijector_from_info, gaussian_error_model
class Flow:
"""A normalizing flow that models tabular data.
Attributes
----------
data_columns : tuple
List of DataFrame columns that the flow expects/produces.
conditional_columns : tuple
List of DataFrame columns on which the flow is conditioned.
info : Any
Object containing any kind of info included with the flow.
Often describes the data the flow is trained on.
latent
The latent distribution of the normalizing flow.
Has it's own sample and log_prob methods.
"""
def __init__(
self,
data_columns: Sequence[str] = None,
bijector: Tuple[InitFunction, Bijector_Info] = None,
conditional_columns: Sequence[str] = None,
latent=None,
data_error_model: Callable = None,
condition_error_model: Callable = None,
autoscale_conditions: bool = True,
seed: int = 0,
info: Any = None,
file: str = None,
_dictionary: dict = None,
):
"""Instantiate a normalizing flow.
Note that while all of the init parameters are technically optional,
you must provide either data_columns and bijector OR file.
In addition, if a file is provided, all other parameters must be None.
Parameters
----------
data_columns : Sequence[str], optional
Tuple, list, or other container of column names.
These are the columns the flow expects/produces in DataFrames.
bijector : Bijector Call, optional
A Bijector call that consists of the bijector InitFunction that
initializes the bijector and the tuple of Bijector Info.
Can be the output of any Bijector, e.g. Reverse(), Chain(...), etc.
conditional_columns : Sequence[str], optional
Names of columns on which to condition the normalizing flow.
latent : distribution, optional
The latent distribution for the normalizing flow. Can be any of
the distributions from pzflow.distributions. If not provided,
a normal distribution is used with the number of dimensions
inferred.
data_error_model : Callable, optional
A callable that defines the error model for data variables.
data_error_model must take key, X, Xerr, nsamples as arguments where:
key is a jax rng key, e.g. jax.random.PRNGKey(0)
X is a 2 dimensional array of data variables, where the order
of variables matches the order of the columns in data_columns
Xerr is the corresponding 2 dimensional array of errors
nsamples is the number of samples to draw from the error distribution
data_error_model must return an array of samples with the shape
(X.shape[0], nsamples, X.shape[1]).
If data_error_model is not provided, a Gaussian error model is assumed.
condition_error_model : Callable, optional
A callable that defines the error model for conditional variables.
condition_error_model must take key, X, Xerr, nsamples as arguments where:
key is a jax rng key, e.g. jax.random.PRNGKey(0)
X is a 2 dimensional array of conditional variables, where the order
of variables matches the order of the columns in conditional_columns
Xerr is the corresponding 2 dimensional array of errors
nsamples is the number of samples to draw from the error distribution
condition_error_model must return an array of samples with the shape
(X.shape[0], nsamples, X.shape[1]).
If condition_error_model is not provided, a Gaussian error model is assumed.
autoscale_conditions : bool, default=True
Sets whether or not conditions are automatically standard scaled when
passed to a conditional flow. I recommend you leave this as True.
seed : int, default=0
The random seed for initial parameters
info : Any, optional
An object to attach to the info attribute.
file : str, optional
Path to file from which to load a pretrained flow.
If a file is provided, all other parameters must be None.
"""
# validate parameters
if (
data_columns is None
and bijector is None
and file is None
and _dictionary is None
):
raise ValueError("You must provide data_columns and bijector OR file.")
if data_columns is not None and bijector is None:
raise ValueError("Please also provide a bijector.")
if data_columns is None and bijector is not None:
raise ValueError("Please also provide data_columns.")
if any(
(
data_columns is not None,
bijector is not None,
conditional_columns is not None,
latent is not None,
data_error_model is not None,
condition_error_model is not None,
info is not None,
)
):
if file is not None:
raise ValueError(
"If providing a file, please do not provide any other parameters."
)
if _dictionary is not None:
raise ValueError(
"If providing a dictionary, please do not provide any other parameters."
)
if file is not None and _dictionary is not None:
raise ValueError("Only provide file or _dictionary, not both.")
# if file or dictionary is provided, load everything from it
if file is not None or _dictionary is not None:
save_dict = self._save_dict()
if file is not None:
with open(file, "rb") as handle:
save_dict.update(pickle.load(handle))
else:
save_dict.update(_dictionary)
if save_dict["class"] != self.__class__.__name__:
raise TypeError(
f"This save file isn't a {self.__class__.__name__}."
+ f"It is a {save_dict['class']}"
)
# load columns and dimensions
self.data_columns = save_dict["data_columns"]
self.conditional_columns = save_dict["conditional_columns"]
self._input_dim = len(self.data_columns)
self.info = save_dict["info"]
# load the latent distribution
self._latent_info = save_dict["latent_info"]
self.latent = getattr(distributions, self._latent_info[0])(
*self._latent_info[1]
)
# load the error models
self.data_error_model = save_dict["data_error_model"]
self.condition_error_model = save_dict["condition_error_model"]
# load the bijector
self._bijector_info = save_dict["bijector_info"]
init_fun, _ = build_bijector_from_info(self._bijector_info)
_, self._forward, self._inverse = init_fun(
random.PRNGKey(0), self._input_dim
)
self._params = save_dict["params"]
# load the conditional means and stds
self._condition_means = save_dict["condition_means"]
self._condition_stds = save_dict["condition_stds"]
# set whether or not to automatically standard scale any
# conditions passed to the normalizing flow
self._autoscale_conditions = save_dict["autoscale_conditions"]
# if no file is provided, use provided parameters
else:
self.data_columns = tuple(data_columns)
self._input_dim = len(self.data_columns)
self.info = info
if conditional_columns is None:
self.conditional_columns = None
self._condition_means = None
self._condition_stds = None
else:
self.conditional_columns = tuple(conditional_columns)
self._condition_means = np.zeros(len(self.conditional_columns))
self._condition_stds = np.ones(len(self.conditional_columns))
# set whether or not to automatically standard scale any
# conditions passed to the normalizing flow
self._autoscale_conditions = autoscale_conditions
# set up the latent distribution
if latent is None:
self.latent = distributions.Normal(self._input_dim)
else:
self.latent = latent
self._latent_info = self.latent.info
# set up the error models
if data_error_model is None:
self.data_error_model = gaussian_error_model
else:
self.data_error_model = data_error_model
if condition_error_model is None:
self.condition_error_model = gaussian_error_model
else:
self.condition_error_model = condition_error_model
# set up the bijector with random params
init_fun, self._bijector_info = bijector
bijector_params, self._forward, self._inverse = init_fun(
random.PRNGKey(seed), self._input_dim
)
self._params = (self.latent._params, bijector_params)
def _get_conditions(self, inputs: pd.DataFrame) -> np.ndarray:
"""Return an array of the bijector conditions."""
# if this isn't a conditional flow, just return empty conditions
if self.conditional_columns is None:
conditions = np.zeros((inputs.shape[0], 1))
# if this a conditional flow, return an array of the conditions
else:
columns = list(self.conditional_columns)
conditions = np.array(inputs[columns].values)
conditions = (conditions - self._condition_means) / self._condition_stds
return conditions
def _get_err_samples(
self,
key,
inputs: pd.DataFrame,
err_samples: int,
type: str = "data",
skip: str = None,
) -> np.ndarray:
"""Draw error samples for each row of inputs. """
X = inputs.copy()
# get list of columns
if type == "data":
columns = list(self.data_columns)
error_model = self.data_error_model
elif type == "conditions":
if self.conditional_columns is None:
return np.zeros((err_samples * X.shape[0], 1))
else:
columns = list(self.conditional_columns)
error_model = self.condition_error_model
else:
raise ValueError("type must be `data` or `conditions`.")
# make sure all relevant variables have error columns
for col in columns:
# if errors not provided for the column, fill in zeros
if f"{col}_err" not in inputs.columns and col != skip:
X[f"{col}_err"] = np.zeros(X.shape[0])
# if we are skipping this column, fill in nan's
elif col == skip:
X[col] = np.nan * np.zeros(X.shape[0])
X[f"{col}_err"] = np.nan * np.zeros(X.shape[0])
# pull out relevant columns
err_columns = [col + "_err" for col in columns]
X, Xerr = np.array(X[columns].values), np.array(X[err_columns].values)
# generate samples
Xsamples = error_model(key, X, Xerr, err_samples)
Xsamples = Xsamples.reshape(X.shape[0] * err_samples, X.shape[1])
# delete the column corresponding to skip
if skip is not None:
idx = columns.index(skip)
Xsamples = np.delete(Xsamples, idx, axis=1)
# if these are samples of conditions, standard scale them!
if type == "conditions":
Xsamples = (Xsamples - self._condition_means) / self._condition_stds
return Xsamples
def _log_prob(
self, params: Pytree, inputs: np.ndarray, conditions: np.ndarray
) -> np.ndarray:
"""Log prob for arrays."""
# calculate log_prob
u, log_det = self._forward(params[1], inputs, conditions=conditions)
log_prob = self.latent.log_prob(params[0], u) + log_det
# set NaN's to negative infinity (i.e. zero probability)
log_prob = np.nan_to_num(log_prob, nan=np.NINF)
return log_prob
def log_prob(
self, inputs: pd.DataFrame, err_samples: int = None, seed: int = None
) -> np.ndarray:
"""Calculates log probability density of inputs.
Parameters
----------
inputs : pd.DataFrame
Input data for which log probability density is calculated.
Every column in self.data_columns must be present.
If self.conditional_columns is not None, those must be present
as well. If other columns are present, they are ignored.
err_samples : int, default=None
Number of samples from the error distribution to average over for
the log_prob calculation. If provided, Gaussian errors are assumed,
and method will look for error columns in `inputs`. Error columns
must end in `_err`. E.g. the error column for the variable `u` must
be `u_err`. Zero error assumed for any missing error columns.
seed : int, default=None
Random seed for drawing the samples with Gaussian errors.
Returns
-------
np.ndarray
Device array of shape (inputs.shape[0],).
"""
if err_samples is None:
# convert data to an array with columns ordered
columns = list(self.data_columns)
X = np.array(inputs[columns].values)
# get conditions
conditions = self._get_conditions(inputs)
# calculate log_prob
return self._log_prob(self._params, X, conditions)
else:
# validate nsamples
assert isinstance(
err_samples, int
), "err_samples must be a positive integer."
assert err_samples > 0, "err_samples must be a positive integer."
# get Gaussian samples
seed = onp.random.randint(1e18) if seed is None else seed
key = random.PRNGKey(seed)
X = self._get_err_samples(key, inputs, err_samples, type="data")
C = self._get_err_samples(key, inputs, err_samples, type="conditions")
# calculate log_probs
log_probs = self._log_prob(self._params, X, C)
probs = np.exp(log_probs.reshape(-1, err_samples))
return np.log(probs.mean(axis=1))
def posterior(
self,
inputs: pd.DataFrame,
column: str,
grid: np.ndarray,
marg_rules: dict = None,
normalize: bool = True,
err_samples: int = None,
seed: int = None,
batch_size: int = None,
nan_to_zero: bool = True,
) -> np.ndarray:
"""Calculates posterior distributions for the provided column.
Calculates the conditional posterior distribution, assuming the
data values in the other columns of the DataFrame.
Parameters
----------
inputs : pd.DataFrame
Data on which the posterior distributions are conditioned.
Must have columns matching self.data_columns, *except*
for the column specified for the posterior (see below).
column : str
Name of the column for which the posterior distribution
is calculated. Must be one of the columns in self.data_columns.
However, whether or not this column is one of the columns in
`inputs` is irrelevant.
grid : np.ndarray
Grid on which to calculate the posterior.
marg_rules : dict, optional
Dictionary with rules for marginalizing over missing variables.
The dictionary must contain the key "flag", which gives the flag
that indicates a missing value. E.g. if missing values are given
the value 99, the dictionary should contain {"flag": 99}.
The dictionary must also contain {"name": callable} for any
variables that will need to be marginalized over, where name is
the name of the variable, and callable is a callable that takes
the row of variables nad returns a grid over which to marginalize
the variable. E.g. {"y": lambda row: np.linspace(0, row["x"], 10)}.
Note: the callable for a given name must *always* return an array
of the same length, regardless of the input row.
err_samples : int, default=None
Number of samples from the error distribution to average over for
the posterior calculation. If provided, Gaussian errors are assumed,
and method will look for error columns in `inputs`. Error columns
must end in `_err`. E.g. the error column for the variable `u` must
be `u_err`. Zero error assumed for any missing error columns.
seed : int, default=None
Random seed for drawing the samples with Gaussian errors.
batch_size : int, default=None
Size of batches in which to calculate posteriors. If None, all
posteriors are calculated simultaneously. Simultaneous calculation
is faster, but memory intensive for large data sets.
normalize : boolean, default=True
Whether to normalize the posterior so that it integrates to 1.
nan_to_zero : bool, default=True
Whether to convert NaN's to zero probability in the final pdfs.
Returns
-------
np.ndarray
Device array of shape (inputs.shape[0], grid.size).
"""
# get the index of the provided column, and remove it from the list
columns = list(self.data_columns)
idx = columns.index(column)
columns.remove(column)
nrows = inputs.shape[0]
batch_size = nrows if batch_size is None else batch_size
# make sure indices run 0 -> nrows
inputs = inputs.reset_index(drop=True)
if err_samples is not None:
# validate nsamples
assert isinstance(
err_samples, int
), "err_samples must be a positive integer."
assert err_samples > 0, "err_samples must be a positive integer."
# set the seed
seed = onp.random.randint(1e18) if seed is None else seed
key = random.PRNGKey(seed)
# empty array to hold pdfs
pdfs = np.zeros((nrows, len(grid)))
# if marginalization rules were passed, we will loop over the rules
# and repeatedly call this method
if marg_rules is not None:
# if the flag is NaN, we must use np.isnan to check for flags
if onp.isnan(marg_rules["flag"]):
def check_flags(data):
return onp.isnan(data)
# else we use np.isclose to check for flags
else:
def check_flags(data):
return onp.isclose(data, marg_rules["flag"])
# first calculate pdfs for unflagged rows
unflagged_idx = inputs[
~check_flags(inputs[columns]).any(axis=1)
].index.tolist()
unflagged_pdfs = self.posterior(
inputs=inputs.iloc[unflagged_idx],
column=column,
grid=grid,
err_samples=err_samples,
seed=seed,
batch_size=batch_size,
normalize=False,
nan_to_zero=nan_to_zero,
)
# save these pdfs in the big array
pdfs = ops.index_update(
pdfs,
ops.index[unflagged_idx, :],
unflagged_pdfs,
indices_are_sorted=True,
unique_indices=True,
)
# we will keep track of all the rows we've already calculated
# posteriors for
already_done = unflagged_idx
# now we will loop over the rules in marg_rules
for name, rule in marg_rules.items():
# ignore the flag, because that's not a column in the data
if name == "flag":
continue
# get the list of new rows for which we need to calculate posteriors
flagged_idx = inputs[check_flags(inputs[name])].index.tolist()
flagged_idx = list(set(flagged_idx).difference(already_done))
# if flagged_idx is empty, move on!
if len(flagged_idx) == 0:
continue
# get the marginalization grid for each row
marg_grids = (
inputs.iloc[flagged_idx]
.apply(rule, axis=1, result_type="expand")
.values
)
# make a new data frame with the marginalization grids replacing
# the values of the flag in the column
marg_inputs = pd.DataFrame(
np.repeat(
inputs.iloc[flagged_idx].values, marg_grids.shape[1], axis=0
),
columns=inputs.columns,
)
marg_inputs[name] = marg_grids.reshape(marg_inputs.shape[0], 1)
# remove the error column if it's present
marg_inputs.drop(f"{name}_err", axis=1, inplace=True, errors="ignore")
# calculate posteriors for these
marg_pdfs = self.posterior(
inputs=marg_inputs,
column=column,
grid=grid,
marg_rules=marg_rules,
err_samples=err_samples,
seed=seed,
batch_size=batch_size,
normalize=False,
nan_to_zero=nan_to_zero,
)
# sum over the marginalized dimension
marg_pdfs = marg_pdfs.reshape(
len(flagged_idx), marg_grids.shape[1], grid.size
)
marg_pdfs = marg_pdfs.sum(axis=1)
# save the new pdfs in the big array
pdfs = ops.index_update(
pdfs,
ops.index[flagged_idx, :],
marg_pdfs,
indices_are_sorted=True,
unique_indices=True,
)
# add these flagged indices to the list of rows already done
already_done += flagged_idx
# now for the main posterior calculation loop
else:
# loop through batches
for batch_idx in range(0, nrows, batch_size):
# get the data batch
# and, if this is a conditional flow, the correpsonding conditions
batch = inputs.iloc[batch_idx : batch_idx + batch_size]
# if not drawing samples, just grab batch and conditions
if err_samples is None:
conditions = self._get_conditions(batch)
batch = np.array(batch[columns].values)
# if only drawing condition samples...
elif len(self.data_columns) == 1:
conditions = self._get_err_samples(
key, batch, err_samples, type="conditions"
)
batch = np.repeat(batch[columns].values, err_samples, axis=0)
# if drawing data and condition samples...
else:
conditions = self._get_err_samples(
key, batch, err_samples, type="conditions"
)
batch = self._get_err_samples(
key, batch, err_samples, skip=column, type="data"
)
# make a new copy of each row for each value of the column
# for which we are calculating the posterior
batch = np.hstack(
(
np.repeat(batch[:, :idx], len(grid), axis=0,),
np.tile(grid, len(batch))[:, None],
np.repeat(batch[:, idx:], len(grid), axis=0,),
)
)
# make similar copies of the conditions
conditions = np.repeat(conditions, len(grid), axis=0)
# calculate probability densities
log_prob = self._log_prob(self._params, batch, conditions).reshape(
(-1, len(grid))
)
prob = np.exp(log_prob)
# if we were Gaussian sampling, average over the samples
if err_samples is not None:
prob = prob.reshape(-1, err_samples, len(grid))
prob = prob.mean(axis=1)
# add the pdfs to the bigger list
pdfs = ops.index_update(
pdfs,
ops.index[batch_idx : batch_idx + batch_size, :],
prob,
indices_are_sorted=True,
unique_indices=True,
)
if normalize:
# normalize so they integrate to one
pdfs = pdfs / np.trapz(y=pdfs, x=grid).reshape(-1, 1)
if nan_to_zero:
# set NaN's equal to zero probability
pdfs = np.nan_to_num(pdfs, nan=0.0)
return pdfs
def sample(
self,
nsamples: int = 1,
conditions: pd.DataFrame = None,
save_conditions: bool = True,
seed: int = None,
) -> pd.DataFrame:
"""Returns samples from the normalizing flow.
Parameters
----------
nsamples : int, default=1
The number of samples to be returned.
conditions : pd.DataFrame, optional
If this is a conditional flow, you must pass conditions for
each sample. nsamples will be drawn for each row in conditions.
save_conditions : bool, default=True
If true, conditions will be saved in the DataFrame of samples
that is returned.
seed : int, optional
Sets the random seed for the samples.
Returns
-------
pd.DataFrame
Pandas DataFrame of samples.
"""
# validate nsamples
assert isinstance(nsamples, int), "nsamples must be a positive integer."
assert nsamples > 0, "nsamples must be a positive integer."
if self.conditional_columns is not None and conditions is None:
raise ValueError(
f"Must provide the following conditions\n{self.conditional_columns}"
)
# if this isn't a conditional flow, get empty conditions
if self.conditional_columns is None:
conditions = np.zeros((nsamples, 1))
# otherwise get conditions and make `nsamples` copies of each
else:
conditions = self._get_conditions(conditions)
conditions = np.repeat(conditions, nsamples, axis=0)
# draw from latent distribution
u = self.latent.sample(self._params[0], conditions.shape[0], seed)
# take the inverse back to the data distribution
x = self._inverse(self._params[1], u, conditions=conditions)[0]
# if not conditional, or save_conditions is False, this is all we need
if self.conditional_columns is None or save_conditions is False:
x = | pd.DataFrame(x, columns=self.data_columns) | pandas.DataFrame |
from infomemes.utils import media_color_schema
from sklearn.cluster import DBSCAN
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import json
def read_sim_results(sim, step_filtered=0):
"""
Basic analysis of a simulation.
sim: simulation object or string with path to json file.
step_filtered: int
Produces a boolan array 'filtered' with True for all media that were
active at any step > step_filtered.
"""
if isinstance(sim, str):
with open(sim, 'r') as f:
sim = json.loads(f.read())
# metadata
duration = sim['metadata']['duration']
media_reproduction_rate = sim['metadata']['media_reproduction_rate']
media_deactivation_rate = sim['metadata']['media_deactivation_rate']
covariance_punishment = sim['metadata']['covariance_punishment']
individuals_xy = np.array(sim['metadata']['individuals_xy'])
individual_renewal_rate = sim['metadata']['individual_renewal_rate']
individual_mui = sim['metadata']['individual_mui']
individual_mcr = sim['metadata']['individual_mcr']
max_reward = sim['metadata']['max_reward']
# data
activated = np.array(list(sim['data']['activated'].values()))
deactivated = np.array(list(sim['data']['deactivated'].values()))
active = np.array([t == duration for t in deactivated])
survival_times = np.array(deactivated) - np.array(activated)
position_x = np.array(list(sim['data']['position_x'].values()))
position_y = np.array(list(sim['data']['position_y'].values()))
cov_x = np.array(list(sim['data']['cov_x'].values()))
cov_y = np.array(list(sim['data']['cov_y'].values()))
cov_diagonal = cov_x + cov_y
cov_xy = np.array(list(sim['data']['cov_xy'].values()))
mpr = np.array(list(sim['data']['meme_production_rate'].values()))
filtered = np.array([deact > step_filtered for deact in deactivated])
else:
survival_times = []
position_x = []
position_y = []
cov_diagonal = []
cov_xy = []
mpr = []
for m in sim.all_media:
if m.active:
survival_times.append(sim.current_step - m.activated)
else:
survival_times.append(m.deactivated - m.activated)
position_x.append(m.x)
position_y.append(m.y)
cov_diagonal.append(m.cov[0, 0] + m.cov[1, 1])
cov_xy.append(m.cov[0, 1])
mpr.append(m.meme_production_rate)
results = {
'duration': duration,
'media_reproduction_rate': media_reproduction_rate,
'media_deactivation_rate': media_deactivation_rate,
'max_reward': max_reward,
'covariance_punishment': covariance_punishment,
'individuals_xy': individuals_xy,
'individual_renewal_rate': individual_renewal_rate,
'individual_mui': individual_mui,
'individual_mcr': individual_mcr,
'activated': activated[filtered],
'deactivated': deactivated[filtered],
'active': active[filtered],
'survival_times': survival_times[filtered].astype('int'),
'position_x': position_x[filtered],
'position_y': position_y[filtered],
'cov_x': cov_x[filtered],
'cov_y': cov_y[filtered],
'cov_diagonal': cov_diagonal[filtered],
'cov_xy': cov_xy[filtered],
'meme_production_rate': mpr[filtered],
'step_filtered': filtered
}
return results
def all_sims_summary(sims_list, step_filtered=0):
"""
Parameters
----------
sims_list: list
List of json files with simulation results
step_filtered: int
Produces a boolan array 'filtered' with True for all media that were
active at any step > step_filtered
Returns
-------
df_sims: Pandas DataFrame with results by simulation
df_media: Pandas DataFrame with results by media
df_clusters: Pandas DataFrame with results by cluster
"""
# Organize Simulations DataFrame
df_sims = pd.DataFrame({
'covariance_punishment': pd.Series([], dtype='float'),
'media_reproduction_rate': pd.Series([], dtype='float'),
'media_deactivation_rate': pd.Series([], dtype='float'),
'individual_mui': pd.Series([], dtype='float'),
'individual_renewal_rate': pd.Series([], dtype='float'),
'individual_mcr': pd.Series([], dtype='float'),
'max_reward': pd.Series([], dtype='float'),
'n_clusters': pd.Series([], dtype='Int64'),
'clusters_distances': pd.Series([], dtype='O'),
'media_dynamics': pd.Series([], dtype='category'),
'individual_dynamics': pd.Series([], dtype='category'),
'cp_category': pd.Series([], dtype='category'),
})
# Organize Media DataFrame
df_media = pd.DataFrame({
'simulation': pd.Series([], dtype='Int64'),
'activated': pd.Series([], dtype='Int64'),
'deactivated': | pd.Series([], dtype='Int64') | pandas.Series |
import unittest
import pandas as pd
import Normalizer
class Normalizer_spec(unittest.TestCase):
def test_should_calcualte_mean(self):
data = pd.Series([None,1.5,2.5,2.2,3.8])
result = Normalizer.mean(data)
self.assertEqual(2.5,result)
def test_should_mean_absolute_deviation(self):
data = pd.Series([None,None,None,1.5,2.5,2.2,3.8])
result = Normalizer.mean_absolute_deviation(data)
self.assertEqual(0.64999999999999991,result)
def test_should_calcualte_std(self):
data = pd.Series([None,1,5,2,5,0,7,0,8])
result = Normalizer.standard_deviation(data)
self.assertEqual(2.9580398915498081,result)
def test_should_calcualte_z_score(self):
data = | pd.Series([None,1,-5,-9,1,0,7,0,8]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 14 07:48:19 2021
@author: <NAME>
"""
import pandas as pd
import numpy as np
# Helper functions
from preprocessing import process_text, convert_to_int, lda_preprocess
from genre_processing import clean_genre, group_genre
from pprint import pprint
# BERTopic
from sentence_transformers import SentenceTransformer
from bertopic import BERTopic
from umap import UMAP
# LDA imports
from gensim.corpora import Dictionary
from gensim.models import LdaModel, Phrases, CoherenceModel
import logging
# Define classes
class lda_model:
def __init__(self, nr_topics=20, nr_passes=50):
self.nr_topics = nr_topics
self.nr_passes = nr_passes
self.is_fitted = False
def _create_lda_documents(docs):
"""
Convert the documents to a structure that aligns with the LdaModel.
Parameters
----------
docs : pd.Series, np.array or similar one dimensional structure
Contains the synopses of all the movies in the training data.
Returns
-------
documents : list of lists
A structure to work with the LdaModel from gensim.
"""
# Preprocess the documents to work with the LDA model
documents = lda_preprocess(docs)
# Create bigrams
bigram = Phrases(documents)
for idx in range(len(documents)):
for token in bigram[documents[idx]]:
if '_' in token:
# Token is a bigram, add to document.
documents[idx].append(token)
return documents
def _convert_lda_to_df(self, model_preds):
"""
Save the predicted probabilites for each document to belong to a topic
in a data frame.
Parameters
----------
model_preds : list of tuples
Structure as generated by predicting topics for new documents.
Returns
-------
lda_df : pd.DataFrame
A data frame with all possible topic predictions and each documents
probability of being in that topic.
"""
lda_df = pd.DataFrame(columns=range(self.nr_topics))
for doc in model_preds:
# Convert list of tuple to a dataframe
a = pd.DataFrame.from_dict({x[0]:x[1] for x in doc}, orient="index").T
# Add extra columns so that it contains all topics
a = a.reindex(columns=range(self.nr_topics), fill_value=0)
# Append to the dataframe
lda_df = lda_df.append(a)
lda_df.reset_index(inplace=True, drop=True)
return lda_df
def create_dictionary(self, X):
"""
Create the dicationary, corpus needed for the LDA model and for
coherence measures.
Parameters
----------
X : pd.Series, np.array or similar one dimensional structure
Contains the synopses of all movies to examine.
Returns
-------
documents : list
The documents pre-processed to work with LdaModel.
dictionary : gensim.corpora.dictionary.Dictionary
Dictionary of all words and id mappings.
corpus : list
List of the documents as a bag of words.
id2word : mapping
A mapping from word id to the actual word.
"""
# Convert the input docs to LDA friendly documents
documents = self._create_lda_documents(X)
# Create a dictionary representation of the documents
dictionary = Dictionary(documents)
# Transform documents to a bag of words reprentation (vectorized form)
corpus = [dictionary.doc2bow(doc) for doc in documents]
# Make a index to word dictionary.
temp = dictionary[0] # This is only to "load" the dictionary.
id2word = dictionary.id2token
# Update values in the object
self.documents = documents
self.dictionary = dictionary
self.corpus = corpus
self.id2word = id2word
return documents, dictionary, corpus, id2word
def fit(self, X):
"""
Fit the LdaModel as specifed by the input parameters. Also saves a logfile.
# The code for the LDA model was inspired by the gensim documentation:
# https://radimrehurek.com/gensim/auto_examples/tutorials/run_lda.html#pre-process-and-vectorize-the-documents
Parameters
----------
X : pd.Series, np.array or similar one dimensional structure
Contains the synopses of all the movies in the training data.
Returns
-------
None.
"""
try:
# Attempt to load the model if one already exists
model = LdaModel.load(f"../Models/gensim/model{self.nr_topics}_{self.nr_passes}pass")
except FileNotFoundError:
# Create the dictionary, corpus and id2word
self.create_dictionary(X)
# Create logging file
logging.basicConfig(filename='gensim.log', format='%(asctime)s:%(levelname)s:%(message)s', level=logging.INFO)
# Fit the model
model = LdaModel(
corpus=self.corpus,
num_topics=self.nr_topics,
passes=self.nr_passes,
alpha="auto",
eta="auto",
id2word=self.id2word,
random_state=0
)
# Save the resulting model
model.save(f"../Models/gensim/model{self.nr_topics}_{self.nr_passes}pass")
# Shutdown the logging file
logging.shutdown()
self.is_fitted = True
# Save the resulting model in the object
self.model = model
def predict(self, X_test):
"""
Predict the topics for previously unseen documents.
Parameters
----------
X_test : pd.Series, np.array or similar one dimensional structure
Contains the synopses of all the movies in the training data.
Returns
-------
predicted_probs : pd.DataFrame
A data frame consisting of the predicted probabilites of each topic
for all documents in test_docs.
"""
try:
# Try reading the file
predicted_probs = pd.read_csv(f"../Output/gensim/lda{self.nr_topics}_df.csv")
except FileNotFoundError:
# Preprocess the documents to work with the model
test_docs_lda = self._create_lda_documents(X_test)
# Create test corpus
test_corpus = [self.dictionary.doc2bow(doc) for doc in test_docs_lda]
# Get the predicted probabilites of belonging to each topic
model_preds = [doc for doc in self.model.get_document_topics(test_corpus)]
# Get the predicted probabilites of belonging to each topic
predicted_probs = self._convert_lda_to_df(model_preds, self.nr_topics)
# Save to csv if it does not exist
predicted_probs.to_csv(f"../Output/gensim/lda{self.nr_topics}_df.csv", index=False)
return predicted_probs
def get_topics_per_class(self, y):
"""
Count the number of topics per class occurrence. The topics are here
generated by argmax, to contrast with BERTopic in which it is chosen
from HDBSCAN.
Parameters
----------
y : pd.Series, np.array or similar one dimensional structure
The genre labels for the plots.
Returns
-------
model_labels : pd.DataFrame
A data frame consisting of counts of class-topic combinations.
"""
try:
# Try reading the files
model_probs = pd.read_csv(f"../Output/gensim/model{self.nr_topics}_df.csv")
model_labels = pd.read_csv(f"../Output/gensim/model{self.nr_topics}_labels.csv")
except FileNotFoundError:
# Convert the estimated probabilites for each topics (which may not)
# include all topics to a data frame with all topics included
model_probs = self._convert_lda_to_df([doc for doc in self.model.get_document_topics(self.corpus)],
nr_topics=self.nr_topics)
# If file doesn't exist: create it
model_probs.to_csv(f"../Output/gensim/model{self.nr_topics}_df.csv", index=False)
# Classify the topics by the maximum probability and calculate the
# size of each combination of Class (truth) and Topic (predicted argmax)
model_labels = pd.DataFrame([y, np.argmax(model_probs.values, axis=1)]).T.\
rename(columns={0: "Class", "Unnamed 0": "Topic"}).groupby(["Class", "Topic"]).\
size().reset_index().rename(columns={0: "Frequency"})
# If file doesn't exist: create it
model_labels.to_csv(f"../Output/gensim/model{self.nr_topics}_labels.csv", index=False)
return model_labels
def coherence_score(self, X):
"""
Calculate coherence metric for LDA models using NPMI.
Parameters
----------
X : pd.Series, np.array or similar one dimensional structure
Contains the documents in the training data.
Returns
-------
c_npmi : float
The coherence score for the generated topics.
"""
if not self.is_fitted:
self.create_dictionary(X)
# Calculate coherence score
c_npmi = CoherenceModel(model=lda_base20.model,
corpus=self.corpus, dictionary=self.dictionary,
coherence='c_nmpi').get_coherence()
return c_npmi
class BERT_model:
def __init__(self, min_topic_size=10):
"""
Create a new object of the custom BERT_model class used for this report.
Parameters
----------
min_topic_size : int, the default is 10.
The minimum size of the topics.
Returns
-------
None.
"""
# Path to the the BERT model
bert_path = 'BERT/all-MiniLM-L6-v2'
# Specify the embedding model
self.sentence_model = SentenceTransformer(bert_path)
# Specify UMAP model with specfied random state (otherwise default)
self.umap_model = UMAP(n_neighbors=15, n_components=5,
min_dist=0.0, metric='cosine', random_state=42)
# Save min_topic_size
self.min_topic_size = min_topic_size
# Topic model with greater topic size and auto topic reduction
self.topic_model = BERTopic(embedding_model=self.sentence_model,
calculate_probabilities=True,
n_gram_range=(1,2),
min_topic_size=self.min_topic_size,
nr_topics="auto",
umap_model=self.umap_model)
# Placholders
self.probs = None
self.topics = None
def fit(self, X, y=None):
"""
Fit the transform on the given data, with or without a class label.
Will attempt to read the files if they exist.
Parameters
----------
X : pd.Series, np.array or similar one dimensional structure
Contains the synopses of all the movies in the training data.
y : pd.Series, np.array or similar one dimensional structure
The genres of the movie synopses indexed by integers.
The default is None.
Returns
-------
None. Instead the topic model is updated and probabilities and topics
saved within the class.
"""
# Specify the model name
self.model_name = "sup"
if y is None:
self.model_name= "unsup"
try:
# Attempt to read the files
if y is None:
# Unsupervised model
topic_model = BERTopic.load(f"../Models/BERTopic/unsup_bigram_model_auto{self.min_topic_size}")
topic_df = pd.read_csv(f"../Output/BERTopic/unsup_bigram_topics_auto{self.min_topic_size}.csv")
else:
# Supervised model
topic_model = BERTopic.load(f"../Models/BERTopic/sup_bigram_model_auto{self.min_topic_size}")
topic_df = pd.read_csv(f"../Output/BERTopic/sup_bigram_topics_auto{self.min_topic_size}.csv")
# Split to corresponding entries
topics = np.array(topic_df["topic"])
probs = np.array(topic_df.drop("topic", axis=1))
# Update the topic model
self.topic_model = topic_model
except FileNotFoundError:
# If the file does not exist; create it
if y is None:
# Unsupervised model
topics, probs = self.topic_model.fit_transform(X)
self.topic_model.save(f"../Models/BERTopic/unsup_bigram_model_auto{self.min_topic_size}")
pd.DataFrame(probs).assign(topic=topics).\
to_csv(f"../Output/BERTopic/unsup_bigram_topics_auto{self.min_topic_size}.csv", index=False)
else:
# Supervised
topics, probs = self.topic_model.fit_transform(X, y=y)
self.topic_model.save(f"../Models/BERTopic/sup_bigram_model_auto{self.min_topic_size}")
pd.DataFrame(probs).assign(topic=topics).\
to_csv(f"../Output/BERTopic/sup_bigram_topics_auto{self.min_topic_size}.csv", index=False)
# Save topics and probabilites
self.topics = topics
self.probs = probs
def get_keywords(self, n=5, n_topics=10):
"""
Print the top n keywords in the top n_topics topics.
Parameters
----------
n : int, the default is 5.
The number of keywords to print.
n_topics : int, the default is 10.
The number of topics to consider.
Returns
-------
None. Results are instead printed.
"""
for i in range(-1, n_topics):
pprint(f"Topic number {i}:")
pprint(dict(self.topic_model.get_topic(i)[:n]))
def get_topic_frequencies(self, n=11):
"""
Get the topics and their frequencies.
Parameters
----------
n : int, the default is 11.
How many topics to return.
Returns
-------
pd.DataFrame
Contains the topic index and their respective counts.
"""
return self.topic_model.get_topic_freq()[0:n]
def get_topics_per_class(self, X, y):
"""
Retrieve the topics generated per genre.
Parameters
----------
X : pd.Series, np.array or similar one dimensional structure
Contains the synopses of all the movies in the training data.
y : pd.Series, np.array or similar one dimensional structure
The genres of the movie synopses indexed by integers.
Returns
-------
topics_class : pd.DataFrame
A dataframe over all the combinations of topics and genres, along
the probability of belonging to each topic.
"""
try:
# Attempt to read the file.
topics_class = | pd.read_csv(f"../Output/BERTopic/topic{self.min_topic_size}class_{self.model_name}.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 8 13:53:14 2018
@author: hejiew
"""
import os
import numpy as np
import pandas as pd
from sklearn import preprocessing
folder_name = './ck_processed'
file_list = os.listdir(folder_name)
new_folder = './resp_data'
if not os.path.exists(new_folder):
os.mkdir(new_folder)
n = 0
for file_name in file_list:
if file_name.find('.csv') == -1:
continue
if file_name.find('m1') == -1:
continue
full_name = os.path.join(folder_name, file_name)
_df = pd.read_csv(full_name, sep=' ',
engine='python')
_df = _df.dropna(axis='columns')
d = {'time': _df.values[:,0],
'value': _df.values[:,1]}
df = | pd.DataFrame(d) | pandas.DataFrame |
import json
import time
import requests
from requests.models import Response
import random
import pandas as pd
import csv
import openpyxl
#Pulls JSONs from URLs list and converts into a JSON array, then saves file
#random wait times to avoid rate limiting, remove if no rate limit
def test():
with open('info.json', 'a') as f:
f.write("{ \n")
f.write(' "info": [ \n')
URLs = []
for url in URLs:
succes = 0
data = requests.get(url, allow_redirects=False)
data.headers.update({'Host': 'order.dominos.com',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/93.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'pt-BR,pt;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache'})
#If not a successful connection, waits a random time, and continues down the list
if data.status_code != 200:
print(url)
print(data.status_code)
waittime = random.randint(2,8)
print('waiting ' + str(waittime) + " seconds...")
time.sleep(waittime)
continue
#On successful connection saves json into masterfile.
jsonOut = data.json()
with open('info.json', 'a') as f:
if url == URLs[-1]:
f.write(" ")
json.dump(jsonOut, f)
f.write("\n ] \n}")
f.close
print("Finished!")
continue
f.write(" ")
json.dump(jsonOut, f)
f.write(",\n")
waitsuc = random.randint(3,6)
print("Success! Waiting " + str(waitsuc) + " seconds...")
time.sleep(waitsuc)
f.close
def convertCsv():
with open('info.json', encoding="utf-8") as json_file:
data = json.load(json_file)
df = | pd.DataFrame(data['info']) | pandas.DataFrame |
import pandas as pd
data_frame = pd.read_csv("C:\\bank-additional-full.csv", sep=";")
cols = ['job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'poutcome']
print("=====================================================================================")
data_1 = data_frame[cols]
print(data_1.head()) # type: DataFrame
print("=====================================================================================")
data_dummies = pd.get_dummies(data_1)
print(data_dummies.head()) # type: DataFrame
print("=====================================================================================\n concatenate:")
# To concatenate along a certain axis, axis=1 for concatenating along columns.
result_df = pd.concat([data_dummies, data_frame], axis=1) # type: #DataFrame, row: 41188, col: 74
print(result_df.head())
print("type:", type(result_df))
print("shape:", result_df.shape)
print("=====================================================================================")
print(result_df.columns.values)
print("=====================================================================================")
# Applies function along input axis of DataFrame. If result_df['y']='yes', then result_df['output']=1.
result_df['output'] = result_df['y'].apply(lambda x: 1 if x == 'yes' else 0)
print(result_df['output'].head(10))
print("type:", type(result_df['output']))
print("==++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++==")
# x = result_df.loc[:, :'nr.employed']
# print(x.head())
y = | pd.DataFrame({'output': result_df.output}) | pandas.DataFrame |
# ms_mint/targets.py
import pandas as pd
import numpy as np
from pathlib import Path as P
from .standards import TARGETS_COLUMNS, DEPRECATED_LABELS
from .helpers import df_diff
from .tools import get_mz_mean_from_formulas
def read_targets(filenames, ms_mode="negative"):
"""
Extracts peak data from csv files that contain peak definitions.
CSV files must contain columns:
- 'peak_label': str, unique identifier
- 'mz_mean': float, center of mass to be extracted in [Da]
- 'mz_width': float, with of mass window in [ppm]
- 'rt_min': float, minimum retention time in [min]
- 'rt_max': float, maximum retention time in [min]
-----
Args:
- filenames: str or PosixPath or list of such with path to csv-file(s)
Returns:
pandas.DataFrame in targets format
"""
if isinstance(filenames, str):
filenames = [filenames]
targets = []
for fn in filenames:
if fn.endswith(".csv"):
df = pd.read_csv(fn)
elif fn.endswith(".xlsx"):
df = | pd.read_excel(fn) | pandas.read_excel |
import datetime as dt
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.datamodel import Observation
from solarforecastarbiter.validation import tasks, validator
from solarforecastarbiter.validation.quality_mapping import (
LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING,
DAILY_VALIDATION_FLAG)
@pytest.fixture()
def make_observation(single_site):
def f(variable):
return Observation(
name='test', variable=variable, interval_value_type='mean',
interval_length=pd.Timedelta('1hr'), interval_label='beginning',
site=single_site, uncertainty=0.1, observation_id='OBSID',
provider='Organization 1', extra_parameters='')
return f
@pytest.fixture()
def default_index(single_site):
return [pd.Timestamp('2019-01-01T08:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T09:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T10:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T11:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T13:00:00', tz=single_site.timezone)]
@pytest.fixture()
def daily_index(single_site):
out = pd.date_range(start='2019-01-01T08:00:00',
end='2019-01-01T19:00:00',
freq='1h',
tz=single_site.timezone)
return out.append(
pd.Index([pd.Timestamp('2019-01-02T09:00:00',
tz=single_site.timezone)]))
def test_validate_ghi(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi')
data = pd.Series([10, 1000, -100, 500, 300], index=default_index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 1, 0, 1, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_validate_mostly_clear(mocker, make_observation):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi').replace(interval_length=pd.Timedelta('5min'))
index = pd.date_range(start='2019-04-01T11:00', freq='5min',
tz=obs.site.timezone, periods=11)
data = pd.Series([742, 749, 756, 763, 769, 774, 779, 784, 789, 793, 700],
index=index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series([1] * 10 + [0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_apply_immediate_validation(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
def test_apply_immediate_validation_already_validated(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 18), (100, 18), (200, 18), (-1, 19), (1500, 18)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
@pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi',
'poa_global', 'relative_humidity'])
def test_apply_immediate_validation_other(
mocker, make_observation, default_index, var):
mock = mocker.MagicMock()
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{var: mock})
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
@pytest.mark.parametrize('var', ['availability', 'curtailment', 'event',
'net_load'])
def test_apply_immediate_validation_defaults(
mocker, make_observation, default_index, var):
mock = mocker.spy(tasks, 'validate_defaults')
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
def test_fetch_and_validate_observation_ghi(mocker, make_observation,
default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_ghi_nones(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(None, 1)] * 5, index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
base = (
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
LATEST_VERSION_FLAG
)
out['quality_flag'] = [
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base,
base,
base,
base | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_not_listed(mocker, make_observation,
default_index):
obs = make_observation('curtailment')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
LATEST_VERSION_FLAG,
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_dni(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_dni_limits_QCRad']]
obs = make_observation('dni')
data = pd.Series([10, 1000, -100, 500, 500], index=default_index)
flags = tasks.validate_dni(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 0, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dni(mocker, make_observation,
default_index):
obs = make_observation('dni')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_dhi(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_dhi_limits_QCRad']]
obs = make_observation('dhi')
data = pd.Series([10, 1000, -100, 200, 200], index=default_index)
flags = tasks.validate_dhi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dhi(mocker, make_observation,
default_index):
obs = make_observation('dhi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_poa_global(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_poa_clearsky']]
obs = make_observation('poa_global')
data = pd.Series([10, 1000, -400, 300, 300], index=default_index)
flags = tasks.validate_poa_global(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_poa_global(mocker, make_observation,
default_index):
obs = make_observation('poa_global')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_air_temp(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_temperature_limits']]
obs = make_observation('air_temperature')
data = pd.Series([10, 1000, -400, 30, 20], index=default_index)
flags = tasks.validate_air_temperature(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_air_temperature(
mocker, make_observation, default_index):
obs = make_observation('air_temperature')
data = pd.DataFrame(
[(0, 0), (200, 0), (20, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_wind_speed(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_wind_limits']]
obs = make_observation('wind_speed')
data = | pd.Series([10, 1000, -400, 3, 20], index=default_index) | pandas.Series |
# EIA_CBECS_Land.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
import pandas as pd
import numpy as np
import io
from flowsa.common import *
from flowsa.flowbyfunctions import assign_fips_location_system
import yaml
"""
MANUFACTURING ENERGY CONSUMPTION SURVEY (MECS)
https://www.eia.gov/consumption/manufacturing/data/2014/
Last updated: 8 Sept. 2020
"""
def eia_mecs_URL_helper(build_url, config, args):
"""
Takes the build url and performs substitutions based on the EIA MECS year
and data tables of interest. Returns the finished url.
"""
# initiate url list
urls = []
# for all tables listed in the source config file...
for table in config['tables']:
# start with build url
url = build_url
# replace '__year__' in build url
url = url.replace('__year__', args['year'])
# 2014 files are in .xlsx format; 2010 files are in .xls format
if(args['year'] == '2010'):
url = url[:-1]
# replace '__table__' in build url
url = url.replace('__table__', table)
# add to list of urls
urls.append(url)
return urls
def eia_mecs_land_call(url, cbesc_response, args):
# Convert response to dataframe
df_raw_data = pd.io.excel.read_excel(io.BytesIO(cbesc_response.content), sheet_name='Table 9.1')
df_raw_rse = pd.io.excel.read_excel(io.BytesIO(cbesc_response.content), sheet_name='RSE 9.1')
if (args["year"] == "2014"):
df_rse = pd.DataFrame(df_raw_rse.loc[12:93]).reindex()
df_data = pd.DataFrame(df_raw_data.loc[16:97]).reindex()
df_description = pd.DataFrame(df_raw_data.loc[16:97]).reindex()
# skip rows and remove extra rows at end of dataframe
df_description.columns = ["NAICS Code(a)", "Subsector and Industry",
"Approximate Enclosed Floorspace of All Buildings Onsite (million sq ft)",
"Establishments(b) (counts)", "Average Enclosed Floorspace per Establishment (sq ft)",
"Approximate Number of All Buildings Onsite (counts)",
"Average Number of Buildings Onsite per Establishment (counts)",
"n8", "n9", "n10", "n11", "n12"]
df_data.columns = ["NAICS Code(a)", "Subsector and Industry",
"Approximate Enclosed Floorspace of All Buildings Onsite (million sq ft)",
"Establishments(b) (counts)", "Average Enclosed Floorspace per Establishment (sq ft)",
"Approximate Number of All Buildings Onsite (counts)",
"Average Number of Buildings Onsite per Establishment (counts)",
"n8", "n9", "n10", "n11", "n12"]
df_rse.columns = ["NAICS Code(a)", "Subsector and Industry",
"Approximate Enclosed Floorspace of All Buildings Onsite (million sq ft)",
"Establishments(b) (counts)", "Average Enclosed Floorspace per Establishment (sq ft)",
"Approximate Number of All Buildings Onsite (counts)",
"Average Number of Buildings Onsite per Establishment (counts)",
"n8", "n9", "n10", "n11", "n12"]
#Drop unused columns
df_description = df_description.drop(columns=["Approximate Enclosed Floorspace of All Buildings Onsite (million sq ft)",
"Establishments(b) (counts)", "Average Enclosed Floorspace per Establishment (sq ft)",
"Approximate Number of All Buildings Onsite (counts)",
"Average Number of Buildings Onsite per Establishment (counts)",
"n8", "n9", "n10", "n11", "n12"])
df_data = df_data.drop(columns=["Subsector and Industry", "n8", "n9", "n10", "n11", "n12"])
df_rse = df_rse.drop(columns=["Subsector and Industry", "n8", "n9", "n10", "n11", "n12"])
else:
df_rse = pd.DataFrame(df_raw_rse.loc[14:97]).reindex()
df_data = pd.DataFrame(df_raw_data.loc[16:99]).reindex()
df_description = pd.DataFrame(df_raw_data.loc[16:99]).reindex()
df_description.columns = ["NAICS Code(a)", "Subsector and Industry",
"Approximate Enclosed Floorspace of All Buildings Onsite (million sq ft)",
"Establishments(b) (counts)", "Average Enclosed Floorspace per Establishment (sq ft)",
"Approximate Number of All Buildings Onsite (counts)",
"Average Number of Buildings Onsite per Establishment (counts)"]
df_data.columns = ["NAICS Code(a)", "Subsector and Industry",
"Approximate Enclosed Floorspace of All Buildings Onsite (million sq ft)",
"Establishments(b) (counts)", "Average Enclosed Floorspace per Establishment (sq ft)",
"Approximate Number of All Buildings Onsite (counts)",
"Average Number of Buildings Onsite per Establishment (counts)"]
df_rse.columns = ["NAICS Code(a)", "Subsector and Industry",
"Approximate Enclosed Floorspace of All Buildings Onsite (million sq ft)",
"Establishments(b) (counts)", "Average Enclosed Floorspace per Establishment (sq ft)",
"Approximate Number of All Buildings Onsite (counts)",
"Average Number of Buildings Onsite per Establishment (counts)"]
# Drop unused columns
df_description = df_description.drop(
columns=["Approximate Enclosed Floorspace of All Buildings Onsite (million sq ft)",
"Establishments(b) (counts)", "Average Enclosed Floorspace per Establishment (sq ft)",
"Approximate Number of All Buildings Onsite (counts)",
"Average Number of Buildings Onsite per Establishment (counts)"])
df_data = df_data.drop(columns=["Subsector and Industry"])
df_rse = df_rse.drop(columns=["Subsector and Industry"])
df_data = df_data.melt(id_vars=["NAICS Code(a)"],
var_name="FlowName",
value_name="FlowAmount")
df_rse = df_rse.melt(id_vars=["NAICS Code(a)"],
var_name="FlowName",
value_name="Spread")
df = pd.merge(df_data, df_rse)
df = | pd.merge(df, df_description) | pandas.merge |
import pandas as pd
import os
import time
try:from ethnicolr import census_ln, pred_census_ln,pred_wiki_name,pred_fl_reg_name
except: os.system('pip install ethnicolr')
import seaborn as sns
import matplotlib.pylab as plt
import scipy
from itertools import permutations
import numpy as np
import matplotlib.gridspec as gridspec
from igraph import VertexClustering
from itertools import combinations
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['font.sans-serif'] = "Palatino"
plt.rcParams['font.serif'] = "Palatino"
plt.rcParams['mathtext.fontset'] = 'custom'
plt.rcParams['mathtext.it'] = 'Palatino:italic'
plt.rcParams['mathtext.bf'] = 'Palatino:bold'
plt.rcParams['mathtext.cal'] = 'Palatino'
from matplotlib.ticker import FormatStrFormatter
from matplotlib import ticker
from sklearn.ensemble import RandomForestClassifier,RandomForestRegressor
from sklearn.neural_network import MLPClassifier,MLPRegressor
from sklearn.linear_model import RidgeClassifierCV
from sklearn.multioutput import MultiOutputRegressor
from sklearn.linear_model import RidgeCV
from sklearn.decomposition import PCA
from statsmodels.stats.multitest import multipletests
import multiprocessing
from multiprocessing import Pool
import tqdm
import igraph
from scipy.stats import pearsonr
global paper_df
global main_df
global g
global graphs
global pal
global homedir
global method
global node_2_a
global a_2_node
global a_2_paper
global control
global matrix_idxs
global prs
# matrix_idxs = {'white_M':0,'white_W':1,'white_U':2,'api_M':3,'api_W':4,'api_U':5,'hispanic_M':6,'hispanic_W':7,'hispanic_U':8,'black_M':9,'black_W':10,'black_U':11}
pal = np.array([[72,61,139],[82,139,139],[180,205,205],[205,129,98]])/255.
# global us_only
# us_only = True
"""
AF = author names, with the format LastName, FirstName; LastName, FirstName; etc..
SO = journal
DT = document type (review or article)
CR = reference list
TC = total citations received (at time of downloading about a year ago)
PD = month of publication
PY = year of publication
DI = DOI
"""
import argparse
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
return v
parser = argparse.ArgumentParser()
parser.add_argument('-homedir',action='store',dest='homedir',default='/Users/maxwell/Dropbox/Bertolero_Bassett_Projects/citations/')
parser.add_argument('-method',action='store',dest='method',default='wiki')
parser.add_argument('-continent',type=str2bool,action='store',dest='continent',default=False)
parser.add_argument('-continent_only',type=str2bool,action='store',dest='continent_only',default=False)
parser.add_argument('-control',type=str2bool,action='store',dest='control',default=False)
parser.add_argument('-within_poc',type=str2bool,action='store',dest='within_poc',default=False)
parser.add_argument('-walk_length',type=str,action='store',dest='walk_length',default='cited')
parser.add_argument('-walk_papers',type=str2bool,action='store',dest='walk_papers',default=False)
r = parser.parse_args()
locals().update(r.__dict__)
globals().update(r.__dict__)
wiki_2_race = {"Asian,GreaterEastAsian,EastAsian":'api', "Asian,GreaterEastAsian,Japanese":'api',
"Asian,IndianSubContinent":'api', "GreaterAfrican,Africans":'black', "GreaterAfrican,Muslim":'black',
"GreaterEuropean,British":'white', "GreaterEuropean,EastEuropean":'white',
"GreaterEuropean,Jewish":'white', "GreaterEuropean,WestEuropean,French":'white',
"GreaterEuropean,WestEuropean,Germanic":'white', "GreaterEuropean,WestEuropean,Hispanic":'hispanic',
"GreaterEuropean,WestEuropean,Italian":'white', "GreaterEuropean,WestEuropean,Nordic":'white'}
matrix_idxs = {'white_M':0,'api_M':1,'hispanic_M':2,'black_M':3,'white_W':4,'api_W':5,'hispanic_W':6,'black_W':7}
def log_p_value(p):
if p == 0.0:
p = "-log10($\it{p}$)>250"
elif p > 0.001:
p = np.around(p,3)
p = "$\it{p}$=%s"%(p)
else:
p = (-1) * np.log10(p)
p = "-log10($\it{p}$)=%s"%(np.around(p,0).astype(int))
return p
def convert_r_p(r,p):
return "$\it{r}$=%s\n%s"%(np.around(r,2),log_p_value(p))
def nan_pearsonr(x,y):
xmask = np.isnan(x)
ymask = np.isnan(y)
mask = (xmask==False) & (ymask==False)
return pearsonr(x[mask],y[mask])
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, m-h, m+h
def make_df(method=method):
"""
this makes the actual data by pulling the race from the census or wiki data
"""
# if os.path.exists('/%s/data/result_df_%s.csv'%(homedir,method)):
# df = pd.read_csv('/%s/data/result_df_%s.csv'%(homedir,method))
# return df
main_df = pd.read_csv('/%s/article_data/NewArticleData2019_filtered.csv'%(homedir),header=0)
result_df = pd.DataFrame(columns=['fa_race','la_race','citation_count'])
store_fa_race = []
store_la_race = []
store_citations = []
store_year = []
store_journal = []
store_fa_g = []
store_la_g = []
store_fa_category = []
store_la_category = []
for entry in tqdm.tqdm(main_df.iterrows(),total=len(main_df)):
store_year.append(entry[1]['PY'])
store_journal.append(entry[1]['SO'])
fa = entry[1].AF.split(';')[0]
la = entry[1].AF.split(';')[-1]
fa_lname,fa_fname = fa.split(', ')
la_lname,la_fname = la.split(', ')
try:store_citations.append(len(entry[1].cited.split(',')))
except:store_citations.append(0)
##wiki
if method =='wiki':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
fa_race = wiki_2_race[pred_wiki_name(fa_df,'lname','fname').race.values[0]]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
la_race = wiki_2_race[pred_wiki_name(la_df,'lname','fname').race.values[0]]
if method =='florida':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
fa_race = pred_fl_reg_name(fa_df,'lname','fname').race.values[0].split('_')[-1]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
la_race = pred_fl_reg_name(la_df,'lname','fname').race.values[0].split('_')[-1]
#census
if method =='census':
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race,la_race= r.race.values
if method =='combined':
##wiki
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['fname','lname'])
fa_race_wiki = wiki_2_race[pred_wiki_name(fa_df,'fname','lname').race.values[0]]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['fname','lname'])
la_race_wiki = wiki_2_race[pred_wiki_name(la_df,'fname','lname').race.values[0]]
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race_census,la_race_census= r.race.values
if la_race_census != la_race_wiki:
if la_race_wiki == 'white':
la_race = la_race_census
if la_race_census == 'white':
la_race = la_race_wiki
elif (la_race_census != 'white') & (la_race_wiki != 'white'): la_race = la_race_wiki
elif la_race_census == la_race_wiki: la_race = la_race_wiki
if fa_race_census != fa_race_wiki:
if fa_race_wiki == 'white':
fa_race = fa_race_census
if fa_race_census == 'white':
fa_race = fa_race_wiki
elif (fa_race_census != 'white') & (fa_race_wiki != 'white'): fa_race = fa_race_wiki
elif fa_race_census == fa_race_wiki: fa_race = fa_race_wiki
store_la_race.append(la_race)
store_fa_race.append(fa_race)
store_fa_g.append(entry[1].AG[0])
store_la_g.append(entry[1].AG[1])
store_fa_category.append('%s_%s' %(fa_race,entry[1].AG[0]))
store_la_category.append('%s_%s' %(la_race,entry[1].AG[1]))
result_df['fa_race'] = store_fa_race
result_df['la_race'] = store_la_race
result_df['fa_g'] = store_fa_g
result_df['la_g'] = store_la_g
result_df['journal'] = store_journal
result_df['year'] = store_year
result_df['citation_count'] = store_citations
result_df['fa_category'] = store_fa_category
result_df['la_category'] = store_la_category
# result_df.citation_count = result_df.citation_count.values.astype(int)
result_df.to_csv('/%s/data/result_df_%s.csv'%(homedir,method),index=False)
return result_df
def make_pr_df(method=method):
"""
this makes the actual data by pulling the race from the census or wiki data
"""
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
prs = np.zeros((main_df.shape[0],8,8))
gender_base = {}
for year in np.unique(main_df.PY.values):
ydf = main_df[main_df.PY==year].AG
fa = np.array([x[0] for x in ydf.values])
la = np.array([x[1] for x in ydf.values])
fa_m = len(fa[fa=='M'])/ len(fa[fa!='U'])
fa_w = len(fa[fa=='W'])/ len(fa[fa!='U'])
la_m = len(la[fa=='M'])/ len(la[la!='U'])
la_w = len(la[fa=='W'])/ len(la[la!='U'])
gender_base[year] = [fa_m,fa_w,la_m,la_w]
asian = [0,1,2]
black = [3,4]
white = [5,6,7,8,9,11,12]
hispanic = [10]
if method =='wiki_black':
black = [3]
for entry in tqdm.tqdm(main_df.iterrows(),total=len(main_df)):
fa = entry[1].AF.split(';')[0]
la = entry[1].AF.split(';')[-1]
fa_lname,fa_fname = fa.split(', ')
la_lname,la_fname = la.split(', ')
fa_g = entry[1].AG[0]
la_g = entry[1].AG[1]
paper_matrix = np.zeros((2,8))
# 1/0
##wiki
if method =='wiki' or method == 'wiki_black':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
fa_race = pred_wiki_name(fa_df,'lname','fname').values[0][3:]
fa_race = [np.sum(fa_race[white]),np.sum(fa_race[asian]),np.sum(fa_race[hispanic]),np.sum(fa_race[black])]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
la_race = pred_wiki_name(la_df,'lname','fname').values[0][3:]
la_race = [np.sum(la_race[white]),np.sum(la_race[asian]),np.sum(la_race[hispanic]),np.sum(la_race[black])]
# #census
if method =='census':
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race = [r.iloc[0]['white'],r.iloc[0]['api'],r.iloc[0]['hispanic'],r.iloc[0]['black']]
la_race = [r.iloc[1]['white'],r.iloc[1]['api'],r.iloc[1]['hispanic'],r.iloc[1]['black']]
if method =='florida':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
asian, hispanic, black, white = pred_fl_reg_name(fa_df,'lname','fname').values[0][3:]
fa_race = [white,asian,hispanic,black]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
asian, hispanic, black, white = pred_fl_reg_name(la_df,'lname','fname').values[0][3:]
la_race = [white,asian,hispanic,black]
if method == 'combined':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['fname','lname'])
fa_race_wiki = pred_wiki_name(fa_df,'lname','fname').values[0][3:]
fa_race_wiki = [np.sum(fa_race_wiki[white]),np.sum(fa_race_wiki[asian]),np.sum(fa_race_wiki[hispanic]),np.sum(fa_race_wiki[black])]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['fname','lname'])
la_race_wiki = pred_wiki_name(la_df,'lname','fname').values[0][3:]
la_race_wiki = [np.sum(la_race_wiki[white]),np.sum(la_race_wiki[asian]),np.sum(la_race_wiki[hispanic]),np.sum(la_race_wiki[black])]
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race_census = [r.iloc[0]['white'],r.iloc[0]['api'],r.iloc[0]['hispanic'],r.iloc[0]['black']]
la_race_census = [r.iloc[1]['white'],r.iloc[1]['api'],r.iloc[1]['hispanic'],r.iloc[1]['black']]
if fa_race_census[0] < fa_race_wiki[0]: fa_race = fa_race_census
else: fa_race = fa_race_wiki
if la_race_census[0] < la_race_wiki[0]: la_race = la_race_census
else: la_race = la_race_wiki
gender_b = gender_base[year]
if fa_g == 'M': paper_matrix[0] = np.outer([1,0],fa_race).flatten()
if fa_g == 'W': paper_matrix[0] = np.outer([0,1],fa_race).flatten()
if fa_g == 'U': paper_matrix[0] = np.outer([gender_b[0],gender_b[1]],fa_race).flatten()
if la_g == 'M': paper_matrix[1] = np.outer([1,0],la_race).flatten()
if la_g == 'W': paper_matrix[1] = np.outer([0,1],la_race).flatten()
if la_g == 'U': paper_matrix[1] = np.outer([gender_b[2],gender_b[3]],la_race).flatten()
paper_matrix = np.outer(paper_matrix[0],paper_matrix[1])
paper_matrix = paper_matrix / np.sum(paper_matrix)
prs[entry[0]] = paper_matrix
np.save('/%s/data/result_pr_df_%s.npy'%(homedir,method),prs)
def make_all_author_race():
"""
this makes the actual data by pulling the race from the census or wiki data,
but this version include middle authors, which we use for the co-authorship networks
"""
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
names = []
lnames = []
fnames = []
for entry in main_df.iterrows():
for a in entry[1].AF.split('; '):
a_lname,a_fname = a.split(', ')
lnames.append(a_lname.strip())
fnames.append(a_fname.strip())
names.append(a)
df = pd.DataFrame(np.array([names,fnames,lnames]).swapaxes(0,1),columns=['name','fname','lname'])
df = df.drop_duplicates('name')
if method =='florida':
# 1/0
r = pred_fl_reg_name(df,'lname','fname')
r.rename(columns={'nh_black':'black','nh_white':'white'})
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
if method =='census':
r = pred_census_ln(df,'lname')
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
all_races = []
r = dict(zip(df.name.values,df.race.values))
for idx,paper in tqdm.tqdm(main_df.iterrows(),total=main_df.shape[0]):
races = []
for a in paper.AF.split('; '):
a_lname,a_fname = a.split(', ')
races.append(r[a_lname.strip()])
all_races.append('_'.join(str(x) for x in races))
main_df['all_races'] = all_races
main_df.to_csv('/%s/data/all_data_%s.csv'%(homedir,method),index=False)
race2wiki = {'api': ["Asian,GreaterEastAsian,EastAsian","Asian,GreaterEastAsian,Japanese", "Asian,IndianSubContinent"],
'black':["GreaterAfrican,Africans", "GreaterAfrican,Muslim"],
'white':["GreaterEuropean,British", "GreaterEuropean,EastEuropean", "GreaterEuropean,Jewish", "GreaterEuropean,WestEuropean,French",
"GreaterEuropean,WestEuropean,Germanic", "GreaterEuropean,WestEuropean,Nordic", "GreaterEuropean,WestEuropean,Italian"],
'hispanic':["GreaterEuropean,WestEuropean,Hispanic"]}
if method =='wiki':
r = pred_wiki_name(df,'lname','fname')
for race in ['api','black','hispanic','white']:
r[race] = 0.0
for e in race2wiki[race]:
r[race] = r[race] + r[e]
for race in ['api','black','hispanic','white']:
for e in race2wiki[race]:
r = r.drop(columns=[e])
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
all_races = []
for idx,paper in tqdm.tqdm(main_df.iterrows(),total=main_df.shape[0]):
races = []
for a in paper.AF.split('; '):
races.append(r[r.name==a].race.values[0])
all_races.append('_'.join(str(x) for x in races))
main_df['all_races'] = all_races
main_df.to_csv('/%s/data/all_data_%s.csv'%(homedir,method),index=False)
if method =='combined':
r_wiki = pred_wiki_name(df,'lname','fname')
for race in ['api','black','hispanic','white']:
r_wiki[race] = 0.0
for e in race2wiki[race]:
r_wiki[race] = r_wiki[race] + r_wiki[e]
for race in ['api','black','hispanic','white']:
for e in race2wiki[race]:
r_wiki = r_wiki.drop(columns=[e])
r_census = pred_census_ln(df,'lname')
census = r_census.white < r_wiki.white
wiki = r_census.white > r_wiki.white
r = r_census.copy()
r[census] = r_census
r[wiki] = r_wiki
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
def figure_1_pr_authors():
df = pd.read_csv('/%s/data/result_df_%s_all.csv'%(homedir,method))
paper_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
results = []
for year in np.unique(paper_df.PY.values):
print (year)
ydf = paper_df[paper_df.PY==year]
names = []
for p in ydf.iterrows():
for n in p[1].AF.split(';'):
names.append(n.strip())
names = np.unique(names)
result = np.zeros((len(names),4))
for idx,name in enumerate(names):
try:result[idx] = df[df.name==name].values[0][-4:]
except:result[idx] = np.nan
results.append(np.nansum(result,axis=0))
results = np.array(results)
plt.close()
sns.set(style='white',font='Palatino')
# pal = sns.color_palette("Set2")
# pal = sns.color_palette("vlag",4)
fig = plt.figure(figsize=(7.5,4),constrained_layout=False)
gs = gridspec.GridSpec(15, 14, figure=fig,wspace=.75,hspace=0,left=.1,right=.9,top=.9,bottom=.1)
ax1 = fig.add_subplot(gs[:15,:7])
ax1_plot = plt.stackplot(np.unique(paper_df.PY),np.flip(results.transpose()[[3,0,2,1]],axis=0), labels=['Black','Hispanic','Asian','White'],colors=np.flip(pal,axis=0), alpha=1)
handles, labels = plt.gca().get_legend_handles_labels()
labels.reverse()
handles.reverse()
leg = plt.legend(loc=2,frameon=False,labels=labels,handles=handles,fontsize=8)
for text in leg.get_texts():
plt.setp(text, color = 'black')
plt.margins(0,0)
plt.ylabel('sum of predicted author race')
plt.xlabel('publication year')
ax1.tick_params(axis='y', which='major', pad=0)
plt.title('a',{'fontweight':'bold'},'left',pad=2)
# 1/0
ax2 = fig.add_subplot(gs[:15,8:])
ax2_plot = plt.stackplot(np.unique(paper_df.PY),np.flip(np.divide(results.transpose()[[3,0,2,1]],np.sum(results,axis=1)),axis=0)*100, labels=['Black','Hispanic','Asian','White'],colors=np.flip(pal,axis=0),alpha=1)
handles, labels = plt.gca().get_legend_handles_labels()
labels.reverse()
handles.reverse()
leg = plt.legend(loc=2,frameon=False,labels=labels,handles=handles,fontsize=8)
for text in leg.get_texts():
plt.setp(text, color = 'white')
plt.margins(0,0)
plt.ylabel('percentage of predicted author race',labelpad=-5)
plt.xlabel('publication year')
ax2.yaxis.set_major_formatter(ticker.PercentFormatter())
ax2.tick_params(axis='y', which='major', pad=0)
plt.title('b',{'fontweight':'bold'},'left',pad=2)
plt.savefig('authors.pdf')
def figure_1_pr():
n_iters = 1000
df =pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0).rename({'PY':'year','SO':'journal'},axis='columns')
matrix = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
results = np.zeros((len(np.unique(df.year)),4))
if within_poc == False:
labels = ['white author & white author','white author & author of color','author of color & white author','author of color &\nauthor of color']
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W',]),
np.vectorize(matrix_idxs.get)(['api_M','api_W','hispanic_M','hispanic_W','black_M','black_W',])]
names = ['white-white','white-poc','poc-white','poc-poc']
plot_matrix = np.zeros((matrix.shape[0],len(groups),len(groups)))
plot_base_matrix = np.zeros((matrix.shape[0],len(groups),len(groups)))
for i in range(len(groups)):
for j in range(len(groups)):
plot_matrix[:,i,j] = np.nansum(matrix[:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
for yidx,year in enumerate(np.unique(df.year)):
papers = df[df.year==year].index
r = np.mean(plot_matrix[papers],axis=0).flatten()
results[yidx,0] = r[0]
results[yidx,1] = r[1]
results[yidx,2] = r[2]
results[yidx,3] = r[3]
if within_poc == True:
names = ['white author','Asian author','Hispanic author','Black author']
groups = [[0,4],[1,5],[2,6],[3,7]]
labels = names
plot_matrix = np.zeros((matrix.shape[0],len(groups)))
for i in range(4):
plot_matrix[:,i] = plot_matrix[:,i] + np.nansum(np.nanmean(matrix[:,groups[i],:],axis=-1),axis=-1)
plot_matrix[:,i] = plot_matrix[:,i] + np.nansum(np.nanmean(matrix[:,:,groups[i]],axis=-1),axis=-1)
for yidx,year in enumerate(np.unique(df.year)):
papers = df[df.year==year].index
r = np.mean(plot_matrix[papers],axis=0).flatten()
results[yidx,0] = r[0]
results[yidx,1] = r[1]
results[yidx,2] = r[2]
results[yidx,3] = r[3]
plt.close()
sns.set(style='white',font='Palatino')
# pal = sns.color_palette("Set2")
# pal = sns.color_palette("vlag",4)
fig = plt.figure(figsize=(7.5,4),constrained_layout=False)
gs = gridspec.GridSpec(15, 16, figure=fig,wspace=.75,hspace=0,left=.1,right=.9,top=.9,bottom=.1)
ax1 = fig.add_subplot(gs[:15,:5])
plt.sca(ax1)
ax1_plot = plt.stackplot(np.unique(df.year),np.flip(results.transpose(),axis=0)*100, labels=np.flip(labels),colors=np.flip(pal,axis=0), alpha=1)
handles, labels = plt.gca().get_legend_handles_labels()
labels.reverse()
handles.reverse()
leg = plt.legend(loc=9,frameon=False,labels=labels,handles=handles,fontsize=8)
for text in leg.get_texts():
plt.setp(text, color = 'w')
plt.margins(0,0)
plt.ylabel('percentage of publications')
plt.xlabel('publication year')
ax1.tick_params(axis='x', which='major', pad=-1)
ax1.tick_params(axis='y', which='major', pad=0)
i,j,k,l = np.flip(results[0]*100)
i,j,k,l = [i,(i+j),(i+j+k),(i+j+k+l)]
i,j,k,l = [np.mean([0,i]),np.mean([i,j]),np.mean([j,k]),np.mean([k,l])]
# i,j,k,l = np.array([100]) - np.array([i,j,k,l])
plt.sca(ax1)
ax1.yaxis.set_major_formatter(ticker.PercentFormatter())
ax1.set_yticks([i,j,k,l])
ax1.set_yticklabels(np.flip(np.around(results[0]*100,0).astype(int)))
ax2 = ax1_plot[0].axes.twinx()
plt.sca(ax2)
i,j,k,l = np.flip(results[-1]*100)
i,j,k,l = [i,(i+j),(i+j+k),(i+j+k+l)]
i,j,k,l = [np.mean([0,i]),np.mean([i,j]),np.mean([j,k]),np.mean([k,l])]
plt.ylim(0,100)
ax2.yaxis.set_major_formatter(ticker.PercentFormatter())
ax2.set_yticks([i,j,k,l])
ax2.set_yticklabels(np.flip(np.around(results[-1]*100,0)).astype(int))
plt.xticks([1995., 2000., 2005., 2010., 2015., 2019],np.array([1995., 2000., 2005., 2010., 2015., 2019]).astype(int))
ax2.tick_params(axis='y', which='major', pad=0)
plt.title('a',{'fontweight':'bold'},'left',pad=2)
plot_df = pd.DataFrame(columns=['year','percentage','iteration'])
for yidx,year in enumerate(np.unique(df.year)):
for i in range(n_iters):
data = df[(df.year==year)]
papers = data.sample(int(len(data)),replace=True).index
r = np.mean(plot_matrix[papers],axis=0).flatten()
total = r.sum()
r = np.array(r[1:])/total
r = r.sum()
tdf = pd.DataFrame(np.array([r,year,i]).reshape(1,-1),columns=['percentage','year','iteration'])
plot_df = plot_df.append(tdf,ignore_index=True)
plot_df.percentage = plot_df.percentage.astype(float)
plot_df.iteration = plot_df.iteration.astype(int)
plot_df.percentage = plot_df.percentage.astype(float) * 100
pct_df = pd.DataFrame(columns=['year','percentage','iteration'])
plot_df = plot_df.sort_values('year')
for i in range(n_iters):
a = plot_df[(plot_df.iteration==i)].percentage.values
# change = np.diff(a) / a[:-1] * 100.
change = np.diff(a)
tdf = pd.DataFrame(columns=['year','percentage','iteration'])
tdf.year = range(1997,2020)
tdf.percentage = change[1:]
tdf.iteration = i
pct_df = pct_df.append(tdf,ignore_index=True)
pct_df = pct_df.dropna()
pct_df = pct_df[np.isinf(pct_df.percentage)==False]
ci = mean_confidence_interval(pct_df.percentage)
ci = np.around(ci,2)
print ("Across 1000 bootstraps, the mean percent increase per year was %s%% (95 CI:%s%%,%s%%)"%(ci[0],ci[1],ci[2]))
plt.text(.5,.48,"Increasing at %s%% per year\n(95%% CI:%s%%,%s%%)"%(ci[0],ci[1],ci[2]),{'fontsize':8,'color':'white'},horizontalalignment='center',verticalalignment='bottom',rotation=9,transform=ax2.transAxes)
axes = []
jidx = 3
for makea in range(5):
axes.append(fig.add_subplot(gs[jidx-3:jidx,6:10]))
jidx=jidx+3
for aidx,journal in enumerate(np.unique(df.journal)):
ax = axes[aidx]
plt.sca(ax)
if aidx == 2: ax.set_ylabel('percentage of publications')
if aidx == 4: ax.set_xlabel('publication\nyear',labelpad=-10)
results = np.zeros(( len(np.unique(df[(df.journal==journal)].year)),4))
for yidx,year in enumerate(np.unique(df[(df.journal==journal)].year)):
papers = df[(df.year==year)&(df.journal==journal)].index
r = np.mean(plot_matrix[papers],axis=0).flatten()
results[yidx,0] = r[0]
results[yidx,1] = r[1]
results[yidx,2] = r[2]
results[yidx,3] = r[3]
data = df[df.journal==journal]
if journal == 'NATURE NEUROSCIENCE':
for i in range(3): results = np.concatenate([[[0,0,0,0]],results],axis=0)
ax1_plot = plt.stackplot(np.unique(df.year),np.flip(results.transpose(),axis=0)*100, labels=np.flip(labels,axis=0),colors=np.flip(pal,axis=0), alpha=1)
plt.margins(0,0)
ax.set_yticks([])
if aidx != 4:
ax.set_xticks([])
else: plt.xticks(np.array([1996.5,2017.5]),np.array([1995.,2019]).astype(int))
plt.title(journal.title(), pad=-10,color='w',fontsize=8)
if aidx == 0: plt.text(0,1,'b',{'fontweight':'bold'},horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
journals = np.unique(df.journal)
plot_df = pd.DataFrame(columns=['journal','year','percentage','iteration'])
for j in journals:
for yidx,year in enumerate(np.unique(df.year)):
for i in range(n_iters):
data = df[(df.year==year)&(df.journal==j)]
papers = data.sample(int(len(data)),replace=True).index
r = np.mean(plot_matrix[papers],axis=0).flatten()
total = r.sum()
r = np.array(r[1:])/total
r = r.sum()
tdf = pd.DataFrame(np.array([j,r,year,i]).reshape(1,-1),columns=['journal','percentage','year','iteration'])
plot_df = plot_df.append(tdf,ignore_index=True)
plot_df.percentage = plot_df.percentage.astype(float)
plot_df.iteration = plot_df.iteration.astype(int)
plot_df.percentage = plot_df.percentage.astype(float) * 100
pct_df = pd.DataFrame(columns=['journal','year','percentage','iteration'])
plot_df = plot_df.sort_values('year')
for i in range(n_iters):
for j in journals:
a = plot_df[(plot_df.iteration==i)&(plot_df.journal==j)].percentage.values
# change = np.diff(a) / a[:-1] * 100.
change = np.diff(a)
tdf = pd.DataFrame(columns=['journal','year','percentage','iteration'])
tdf.year = range(1997,2020)
tdf.percentage = change[1:]
tdf.journal = j
tdf.iteration = i
pct_df = pct_df.append(tdf,ignore_index=True)
pct_df = pct_df.dropna()
pct_df = pct_df[np.isinf(pct_df.percentage)==False]
ci = pct_df.groupby(['journal']).percentage.agg(mean_confidence_interval).values
axes = []
jidx = 3
for makea in range(5):
axes.append(fig.add_subplot(gs[jidx-3:jidx,11:]))
jidx=jidx+3
for i,ax,journal,color in zip(range(5),axes,journals,sns.color_palette("rocket_r", 5)):
plt.sca(ax)
ax.clear()
#
# plot_df[np.isnan(plot_df.percentage)] = 0.0
if i == 0: plt.text(0,1,'c',{'fontweight':'bold'},horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
lp = sns.lineplot(data=plot_df[plot_df.journal==journal],y='percentage',x='year',color=color,ci='sd')
plt.margins(0,0)
thisdf = plot_df[plot_df.journal==journal]
minp = int(np.around(thisdf.mean()['percentage'],0))
thisdf = thisdf[thisdf.year==thisdf.year.max()]
maxp = int(np.around(thisdf.mean()['percentage'],0))
plt.text(-0.01,.5,'%s'%(minp),horizontalalignment='right',verticalalignment='top', transform=ax.transAxes,fontsize=10)
plt.text(1.01,.9,'%s'%(maxp),horizontalalignment='left',verticalalignment='top', transform=ax.transAxes,fontsize=10)
ax.set_yticks([])
# ax.set_xticks([])
ax.set_ylabel('')
plt.margins(0,0)
ax.set_yticks([])
if i == 2:
ax.set_ylabel('percentage of publications',labelpad=12)
if i != 4: ax.set_xticks([])
else: plt.xticks(np.array([1.5,22.5]),np.array([1995.,2019]).astype(int))
mean_pc,min_pc,max_pc = np.around(ci[i],2)
if i == 4: ax.set_xlabel('publication\nyear',labelpad=-10)
else: ax.set_xlabel('')
plt.text(.99,0,'95%' + "CI: %s<%s<%s"%(min_pc,mean_pc,max_pc),horizontalalignment='right',verticalalignment='bottom', transform=ax.transAxes,fontsize=8)
if journal == 'NATURE NEUROSCIENCE':
plt.xlim(-3,21)
plt.savefig('/%s/figures/figure1_pr_%s_%s.pdf'%(homedir,method,within_poc))
def validate():
black_names = pd.read_csv('%s/data/Black scientists - Faculty.csv'%(homedir))['Name'].values[1:]
fnames = []
lnames = []
all_names =[]
for n in black_names:
try:
fn,la = n.split(' ')[:2]
fnames.append(fn.strip())
lnames.append(la.strip())
all_names.append('%s_%s'%(fn.strip(),la.strip()))
except:continue
black_df = pd.DataFrame(np.array([all_names,fnames,lnames]).swapaxes(0,1),columns=['name','fname','lname'])
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
names = []
lnames = []
fnames = []
for entry in main_df.iterrows():
for a in entry[1].AF.split('; '):
a_lname,a_fname = a.split(', ')
lnames.append(a_lname.strip())
fnames.append(a_fname.strip())
names.append('%s_%s'%(a_fname,a_lname))
main_df = pd.DataFrame(np.array([names,fnames,lnames]).swapaxes(0,1),columns=['name','fname','lname'])
main_df = main_df.drop_duplicates('name')
if method == 'wiki':
black_r = pred_wiki_name(black_df,'lname','fname')
all_r = pred_wiki_name(main_df,'lname','fname')
asian = [0,1,2]
black = [3,4]
white = [5,6,7,8,9,11,12]
hispanic = [10]
all_df = pd.DataFrame(columns=['probability','sample'])
all_df['probability'] = all_r.as_matrix()[:,4:][:,black].sum(axis=1)
all_df['sample'] = 'papers'
black_df = pd.DataFrame(columns=['probability','sample'])
black_df['probability'] = black_r.as_matrix()[:,4:][:,black].sum(axis=1)
black_df['sample'] = 'Black-in-STEM'
if method == 'florida':
black_r = pred_fl_reg_name(black_df,'lname','fname')
all_r = pred_fl_reg_name(main_df,'lname','fname')
asian = [0,1,2]
black = [3,4]
white = [5,6,7,8,9,11,12]
hispanic = [10]
all_df = pd.DataFrame(columns=['probability','sample'])
all_df['probability'] = all_r.values[:,-2]
all_df['sample'] = 'papers'
black_df = pd.DataFrame(columns=['probability','sample'])
black_df['probability'] = black_r.values[:,-2]
black_df['sample'] = 'Black-in-STEM'
if method == 'census':
black_r = pred_census_ln(black_df,'lname')
all_r = pred_census_ln(main_df,'lname')
all_df = pd.DataFrame(columns=['probability','sample'])
all_df['probability'] = all_r.values[:,-3]
all_df['sample'] = 'papers'
black_df = pd.DataFrame(columns=['probability','sample'])
black_df['probability'] = black_r.values[:,-3]
black_df['sample'] = 'Black-in-STEM'
data = all_df.append(black_df,ignore_index=True)
data.probability = data.probability.astype(float)
plt.close()
sns.set(style='white',font='Palatino')
fig = plt.figure(figsize=(7.5,3),constrained_layout=True)
gs = gridspec.GridSpec(6,6, figure=fig)
ax1 = fig.add_subplot(gs[:6,:3])
plt.sca(ax1)
sns.histplot(data=data,x='probability',hue="sample",stat='density',common_norm=False,bins=20)
ax2 = fig.add_subplot(gs[:6,3:])
plt.sca(ax2)
sns.histplot(data=data,x='probability',hue="sample",stat='density',common_norm=False,bins=20)
plt.ylim(0,2.5)
plt.savefig('Black-in-STEM_%s.pdf'%(method))
plt.close()
sns.set(style='white',font='Palatino')
fig = plt.figure(figsize=(7.5,3),constrained_layout=True)
gs = gridspec.GridSpec(6,6, figure=fig)
ax1 = fig.add_subplot(gs[:6,:3])
plt.sca(ax1)
sns.histplot(data=data[data['sample']=='papers'],x='probability',stat='density',common_norm=False,bins=20)
ax2 = fig.add_subplot(gs[:6,3:])
plt.sca(ax2)
sns.histplot(data=data[data['sample']=='Black-in-STEM'],x='probability',hue="sample",stat='density',common_norm=False,bins=20)
# plt.ylim(0,2.5)
plt.savefig('Black-in-STEM_2.pdf')
def make_pr_control():
"""
control for features of citing article
"""
# 1) the year of publication
# 2) the journal in which it was published
# 3) the number of authors
# 4) whether the paper was a review article
# 5) the seniority of the paper’s first and last authors.
# 6) paper location
df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
prs = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
cont = pd.read_csv('/%s/article_data/CountryAndContData.csv'%(homedir))
df = df.merge(cont,how='outer',left_index=True, right_index=True)
df = df.merge(pd.read_csv('/%s/article_data/SeniorityData.csv'%(homedir)),left_index=True, right_index=True)
reg_df = pd.DataFrame(columns=['year','n_authors','journal','paper_type','senior','location'])
for entry in tqdm.tqdm(df.iterrows(),total=len(df)):
idx = entry[0]
paper = entry[1]
year = entry[1].PY
n_authors = len(paper.AF.split(';'))
journal = entry[1].SO
paper_type = paper.DT
senior = entry[1].V4
try: loc = entry[1]['FirstListed.Cont'].split()[0]
except: loc = 'None'
reg_df.loc[len(reg_df)] = [year,n_authors,journal,paper_type,senior,loc]
reg_df["n_authors"] = pd.to_numeric(reg_df["n_authors"])
reg_df["year"] = pd.to_numeric(reg_df["year"])
reg_df["senior"] = pd.to_numeric(reg_df["senior"])
skl_df = pd.get_dummies(reg_df).values
ridge = MultiOutputRegressor(RidgeCV(alphas=[1e-5,1e-4,1e-3, 1e-2, 1e-1, 1,10,25,50,75,100])).fit(skl_df,prs.reshape(prs.shape[0],-1))
ridge_probabilities = ridge.predict(skl_df)
ridge_probabilities = np.divide((ridge_probabilities), np.sum(ridge_probabilities,axis=1).reshape(-1,1))
ridge_probabilities = ridge_probabilities.reshape(ridge_probabilities.shape[0],8,8)
np.save('/%s/data/probabilities_pr_%s.npy'%(homedir,method),ridge_probabilities)
def make_pr_control_jn():
"""
control for features of citing article
"""
# 1) the year of publication
# 2) the journal in which it was published
# 3) the number of authors
# 4) whether the paper was a review article
# 5) the seniority of the paper’s first and last authors.
# 6) paper location
# 6) paper sub-field
df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
prs = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
cont = pd.read_csv('/%s/article_data/CountryAndContData.csv'%(homedir))
df = df.merge(cont,how='outer',left_index=True, right_index=True)
df = df.merge(pd.read_csv('/%s/article_data/SeniorityData.csv'%(homedir)),left_index=True, right_index=True)
df = df.rename(columns={'DI':'doi'})
df['category'] = 'none'
sub = pd.read_csv('/%s/article_data/JoNcategories_no2019.csv'%(homedir))
for cat,doi in zip(sub.category,sub.doi):
df.iloc[np.where(df.doi==doi)[0],-1] = cat
reg_df = pd.DataFrame(columns=['year','n_authors','journal','paper_type','senior','location','category'])
for entry in tqdm.tqdm(df.iterrows(),total=len(df)):
idx = entry[0]
paper = entry[1]
year = entry[1].PY
n_authors = len(paper.AF.split(';'))
journal = entry[1].SO
paper_type = paper.DT
senior = entry[1].V4
cat = entry[1].category
try: loc = entry[1]['FirstListed.Cont'].split()[0]
except: loc = 'None'
reg_df.loc[len(reg_df)] = [year,n_authors,journal,paper_type,senior,loc,cat]
reg_df["n_authors"] = pd.to_numeric(reg_df["n_authors"])
reg_df["year"] = pd.to_numeric(reg_df["year"])
reg_df["senior"] = pd.to_numeric(reg_df["senior"])
skl_df = pd.get_dummies(reg_df).values
ridge = MultiOutputRegressor(RidgeCV(alphas=[1e-5,1e-4,1e-3, 1e-2, 1e-1, 1,10,25,50,75,100])).fit(skl_df,prs.reshape(prs.shape[0],-1))
ridge_probabilities = ridge.predict(skl_df)
ridge_probabilities = np.divide((ridge_probabilities), np.sum(ridge_probabilities,axis=1).reshape(-1,1))
ridge_probabilities = ridge_probabilities.reshape(ridge_probabilities.shape[0],8,8)
np.save('/%s/data/probabilities_pr_%s_jn.npy'%(homedir,method),ridge_probabilities)
df = df.rename(columns={'DI':'doi'})
df['category'] = 'none'
def write_matrix():
main_df = pd.read_csv('/%s/data/ArticleDataNew.csv'%(homedir))
prs = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
small_matrix = np.zeros((2,2))
matrix_idxs = {'white':0,'api':1,'hispanic':2,'black':3}
small_idxs = {'white':0,'api':1,'hispanic':1,'black':1}
for fa_r in ['white','api','hispanic','black']:
for la_r in ['white','api','hispanic','black']:
small_matrix[small_idxs[fa_r],small_idxs[la_r]] += np.sum(prs[:,matrix_idxs[fa_r],matrix_idxs[la_r]],axis=0)
np.save('/Users/maxwell/Documents/GitHub/unbiasedciter/expected_matrix_%s.npy'%(method),np.sum(prs,axis=0))
np.save('//Users/maxwell/Documents/GitHub/unbiasedciter/expected_small_matrix_%s.npy'%(method),small_matrix)
def convert_df():
main_df = | pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0) | pandas.read_csv |
import numpy as np
import pandas as pd
from scipy import interpolate
import pickle # to serialise objects
from scipy import stats
import seaborn as sns
from sklearn import metrics
from sklearn.model_selection import train_test_split
sns.set(style='whitegrid', palette='muted', font_scale=1.5)
RANDOM_SEED = 42
dataset_train = pd.read_csv('final_training_set_8people.csv')
training_set = pd.DataFrame(dataset_train.iloc[:,:].values)
training_set.columns = ["User","Activity", "Timeframe", "X axis", "Y axis", "Z axis"]
X = training_set.iloc[:, 3]
X = X.astype(float)
X = (X*1000000).astype('int64')
Y = training_set.iloc[:, 4]
Y = Y.astype(float)
Y = (Y*1000000).astype('int64')
Z = training_set.iloc[:, 5]
Z = Z.astype(float)
Z = (Z*1000000).astype('int64')
Old_T = (training_set.iloc[:, 2]).astype(float)
Old_T = (Old_T * 1000000)
Old_T = Old_T.astype('int64')
New_T = np.arange(0, 12509996000, 50000)
New_T = New_T.astype('int64')
# find interpolation function
interpolate_function = interpolate.interp1d(Old_T, X, axis = 0, fill_value="extrapolate")
X_Final = interpolate_function((New_T))
interpolate_function = interpolate.interp1d(Old_T, Y, axis = 0, fill_value="extrapolate")
Y_Final = interpolate_function((New_T))
interpolate_function = interpolate.interp1d(Old_T, Z, axis = 0, fill_value="extrapolate")
Z_Final = interpolate_function((New_T))
#Combining data into one pandas dataframe
Dataset = pd.DataFrame()
Dataset['X_Final'] = X_Final
Dataset['Y_Final'] = Y_Final
Dataset['Z_Final'] = Z_Final
Dataset['New_Timeframe'] = New_T
Dataset = Dataset/1e6
Dataset = Dataset[['New_Timeframe', 'X_Final', 'Y_Final', 'Z_Final']]
Dataset['New_Activity'] = ""
#Dataset = Dataset.astype('int64')
Dataset = Dataset[['New_Activity', 'New_Timeframe', 'X_Final', 'Y_Final', 'Z_Final']]
#function to fill in new dataset with related activity
Dataset = Dataset.to_numpy()
training_set = training_set.to_numpy()
time = 0
temp = training_set[0][1]
var_to_assign = ""
last_row = 0
new_row = 0
for i in range(len(training_set)-1):
if(training_set[i][1] == temp):
continue
if (training_set[i][1] != temp):
var_to_assign = temp
temp = training_set[i][1]
time = training_set[i][2]
a1 = [x for x in Dataset[:, 1] if x <= time]
new_row = len(a1)
Dataset[last_row:new_row+1, 0] = var_to_assign
last_row = new_row
continue
#converting both arrays back to Dataframes
Dataset = pd.DataFrame(Dataset)
Dataset.columns = ['New_Activity', 'New_Timeframe', 'X_Final', 'Y_Final', 'Z_Final']
training_set = pd.DataFrame(training_set)
training_set.columns = ["User","Activity", "Timeframe", "X axis", "Y axis", "Z axis"]
#Filling empty Dataset values
#Checking to see which index values are empty
df_missing = pd.DataFrame()
df_missing = Dataset[Dataset.isnull().any(axis=1)]
#Filling all empty values with preceding values
Dataset['New_Activity'].fillna(method = 'ffill', inplace = True)
Dataset = Dataset[:-7]
#to confirm no empty dataframes are present
df_empty = pd.DataFrame()
df_empty = Dataset[Dataset['New_Activity']=='']
#Combining smaller classes into larger/main classes
Dataset = Dataset.to_numpy()
for i in range(0, len(Dataset)-1):
if Dataset[i][0] == "a_loadwalk" or Dataset[i][0] == "a_jump":
Dataset[i][0] = "a_walk"
if Dataset[i][0] == "p_squat" or Dataset[i][0] == "p_kneel" or Dataset[i][0] == "p_lie" or Dataset[i][0] == "t_lie_sit" or Dataset[i][0] == "t_sit_lie" or Dataset[i][0] == "t_sit_stand":
Dataset[i][0] = "p_sit"
if Dataset[i][0] == "p_bent" or Dataset[i][0] == "t_bend" or Dataset[i][0] == "t_kneel_stand" or Dataset[i][0] == "t_stand_kneel" or Dataset[i][0] == "t_stand_sit" or Dataset[i][0] == "t_straighten" or Dataset[i][0] == "t_turn":
Dataset[i][0] = "p_stand"
if Dataset[i][0] == "unknown":
Dataset[i][0] = Dataset[i-1][0]
Dataset = pd.DataFrame(Dataset)
Dataset.columns = ['New_Activity', 'New_Timeframe', 'X_Final', 'Y_Final', 'Z_Final']
#Encoding the Activity
from sklearn.preprocessing import LabelEncoder
Label = LabelEncoder()
Dataset['Label'] = Label.fit_transform(Dataset['New_Activity'])
Label_Encoder_mapping = dict(zip(Label.classes_, Label.transform(Label.classes_)))
#Adding Standardized Scaling to data
X = Dataset[['X_Final', 'Y_Final', 'Z_Final']]
y = Dataset[['Label']]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X = scaler.fit_transform(X)
scaled_X = pd.DataFrame(data=X, columns = ['X_Final', 'Y_Final', 'Z_Final'])
scaled_X['Label'] = y.values
#Feature Generation and Data Transformation
TIME_STEPS = 200
N_FEATURES = 3
STEP = 20
segments = []
labels = []
for i in range(0, len(Dataset) - TIME_STEPS, STEP): #To give the starting point of each batch
xs = scaled_X['X_Final'].values[i: i + TIME_STEPS]
ys = scaled_X['Y_Final'].values[i: i + TIME_STEPS]
zs = scaled_X['Z_Final'].values[i: i + TIME_STEPS]
label = stats.mode(scaled_X['Label'][i: i + TIME_STEPS]) #this statement returns mode and count
label = label[0][0] #to ge value of mode
segments.append([xs, ys, zs])
labels.append(label)
#reshaping our data
reshaped_segments = np.asarray(segments, dtype = np.float32).reshape(-1, TIME_STEPS, N_FEATURES)
labels = np.asarray(labels)
"""#Using one hot encoding
l = pd.DataFrame(labels)
l_one_hot = pd.get_dummies(l)
labels_columns = l_one_hot.idxmax(axis = 1)
labels = np.asarray(pd.get_dummies(labels), dtype = np.float32)
"""
#labels.shape
X_train = reshaped_segments
y_train = labels
#Importing Test Set
#Importing Test DataSet
Test_set = pd.read_csv('final_test_set_2people.csv')
Test_set.drop(['Unnamed: 0'], axis = 1, inplace = True)
#combing smaller classes to bigger classes
Test_set = Test_set.to_numpy()
for i in range(0, len(Test_set)-1):
if Test_set[i][1] == "a_loadwalk" or Test_set[i][1] == "a_jump":
Test_set[i][1] = "a_walk"
if Test_set[i][1] == "p_squat" or Test_set[i][1] == "p_kneel" or Test_set[i][1] == "p_lie" or Test_set[i][1] == "t_lie_sit" or Test_set[i][1] == "t_sit_lie" or Test_set[i][1] == "t_sit_stand":
Test_set[i][1] = "p_sit"
if Test_set[i][1] == "p_bent" or Test_set[i][1] == "t_bend" or Test_set[i][1] == "t_kneel_stand" or Test_set[i][1] == "t_stand_kneel" or Test_set[i][1] == "t_stand_sit" or Test_set[i][1] == "t_straighten" or Test_set[i][1] == "t_turn":
Test_set[i][1] = "p_stand"
if Test_set[i][0] == " " or Test_set[i][0] == "unknown":
Test_set[i][0] = Test_set[i-1][0]
Test_set = pd.DataFrame(Test_set)
Test_set.columns = ["User","New_Activity", "Timeframe", "X axis", "Y axis", "Z axis"]
#Filling empty Dataset values
#Checking to see which index values are empty
df_missing = | pd.DataFrame() | pandas.DataFrame |
import sys
import json
import hjson
import numpy as np
import pandas as pd
import asyncio
import os
import pprint
from hashlib import sha256
from multiprocessing import Pool
from time import time
from passivbot import *
from bybit import create_bot as create_bot_bybit
from bybit import fetch_trades as bybit_fetch_trades
from bybit import calc_cross_long_liq_price as bybit_calc_cross_long_liq_price
from bybit import calc_cross_shrt_liq_price as bybit_calc_cross_shrt_liq_price
from binance import create_bot as create_bot_binance
from binance import fetch_trades as binance_fetch_trades
from binance import calc_cross_long_liq_price as binance_calc_cross_long_liq_price
from binance import calc_cross_shrt_liq_price as binance_calc_cross_shrt_liq_price
from typing import Iterator
def prep_ticks(df: pd.DataFrame) -> np.ndarray:
dfc = df[df.price != df.price.shift(1)] # drop consecutive same price trades
dfc.index = np.arange(len(dfc))
if 'side' in dfc.columns:
# bybit
buyer_maker = dfc.side == 'Sell'
buyer_maker.name = 'buyer_maker'
elif 'is_buyer_maker' in dfc.columns:
# binance
buyer_maker = dfc.is_buyer_maker
buyer_maker.name = 'buyer_maker'
else:
raise Exception('trades of unknown format')
dfcc = pd.concat([dfc.price, buyer_maker, dfc.timestamp], axis=1)
return dfcc.values
def backtest(ticks: np.ndarray, settings: dict):
# ticks formatting [price: float, buyer_maker: bool, timestamp: float]
ss = settings
pos_size, pos_price, reentry_price, reentry_qty, liq_price = 0.0, 0.0, 0.0, 0.0, 0.0
closest_long_liq, closest_shrt_liq = 1.0, 1.0
stop_loss_liq_diff_price, stop_loss_pos_price_diff_price, stop_loss_price = 0.0, 0.0, 0.0
actual_balance = ss['starting_balance']
apparent_balance = actual_balance * ss['balance_pct']
pnl_plus_fees_cumsum, loss_cumsum, profit_cumsum, fee_paid_cumsum = 0.0, 0.0, 0.0, 0.0
if ss['inverse']:
min_qty_f = calc_min_qty_inverse
long_pnl_f = calc_long_pnl_inverse
shrt_pnl_f = calc_shrt_pnl_inverse
cost_f = calc_cost_inverse
pos_margin_f = calc_margin_cost_inverse
max_pos_size_f = calc_max_pos_size_inverse
min_entry_qty_f = calc_min_entry_qty_inverse
long_liq_price_f = lambda bal, psize, pprice: \
bybit_calc_cross_long_liq_price(bal, psize, pprice, ss['max_leverage'])
shrt_liq_price_f = lambda bal, psize, pprice: \
bybit_calc_cross_shrt_liq_price(bal, psize, pprice, ss['max_leverage'])
else:
min_qty_f = calc_min_qty_linear
long_pnl_f = calc_long_pnl_linear
shrt_pnl_f = calc_shrt_pnl_linear
cost_f = calc_cost_linear
pos_margin_f = calc_margin_cost_linear
max_pos_size_f = calc_max_pos_size_linear
min_entry_qty_f = calc_min_entry_qty_linear
long_liq_price_f = lambda bal, psize, pprice: \
binance_calc_cross_long_liq_price(bal, psize, pprice, ss['leverage'])
shrt_liq_price_f = lambda bal, psize, pprice: \
binance_calc_cross_shrt_liq_price(bal, psize, pprice, ss['leverage'])
break_on = {e[0]: eval(e[1]) for e in settings['break_on'] if e[0].startswith('ON:')}
ema = ticks[0][0]
ema_alpha = 2 / (ss['ema_span'] + 1)
ema_alpha_ = 1 - ema_alpha
prev_trade_ts = 0
min_trade_delay_millis = ss['latency_simulation_ms'] if 'latency_simulation_ms' in ss else 1000
trades = []
ob = [min(ticks[0][0], ticks[1][0]),
max(ticks[0][0], ticks[1][0])]
for k, t in enumerate(ticks):
did_trade = False
if t[1]:
# maker buy, taker sel
if pos_size == 0.0:
# create long pos
if ss['do_long']:
price = calc_no_pos_bid_price(ss['price_step'], ss['ema_spread'], ema, ob[0])
if t[0] < price and ss['do_long']:
did_trade = True
qty = min_entry_qty_f(ss['qty_step'], ss['min_qty'], ss['min_cost'],
ss['entry_qty_pct'], ss['leverage'], apparent_balance,
price)
trade_type, trade_side = 'entry', 'long'
pnl = 0.0
fee_paid = -cost_f(qty, price) * ss['maker_fee']
elif pos_size > 0.0:
closest_long_liq = min(calc_diff(liq_price, t[0]), closest_long_liq)
if t[0] <= liq_price and closest_long_liq < 0.2:
# long liquidation
print('\nlong liquidation')
return []
if t[0] < reentry_price:
# add to long pos
did_trade, qty, price = True, reentry_qty, reentry_price
trade_type, trade_side = 'reentry', 'long'
pnl = 0.0
fee_paid = -cost_f(qty, price) * ss['maker_fee']
# check if long stop loss triggered
if t[0] <= stop_loss_liq_diff_price:
stop_loss_price = ob[1]
stop_loss_type = 'stop_loss_liq_diff'
elif t[0] <= stop_loss_pos_price_diff_price:
stop_loss_price = ob[1]
stop_loss_type = 'stop_loss_pos_price_diff'
else:
stop_loss_price = 0.0
else:
if t[0] <= pos_price:
# close shrt pos
min_close_qty = calc_min_close_qty(
ss['qty_step'], ss['min_qty'], ss['min_close_qty_multiplier'],
min_entry_qty_f(ss['qty_step'], ss['min_qty'], ss['min_cost'],
ss['entry_qty_pct'], ss['leverage'], apparent_balance,
t[0])
)
qtys, prices = calc_shrt_closes(ss['price_step'],
ss['qty_step'],
min_close_qty,
ss['min_markup'],
ss['max_markup'],
pos_size,
pos_price,
ob[0],
ss['n_close_orders'])
if t[0] < prices[0]:
did_trade, qty, price = True, qtys[0], prices[0]
trade_type, trade_side = 'close', 'shrt'
pnl = shrt_pnl_f(pos_price, price, qty)
fee_paid = -cost_f(qty, price) * ss['maker_fee']
elif t[0] < stop_loss_price:
# shrt stop loss
did_trade = True
qty = calc_pos_reduction_qty(ss['qty_step'], ss['stop_loss_pos_reduction'],
pos_size)
price = stop_loss_price
trade_type, trade_side = stop_loss_type, 'shrt'
pnl = shrt_pnl_f(pos_price, price, qty)
fee_paid = -cost_f(qty, price) * ss['maker_fee']
ob[0] = t[0]
else:
# maker sel, taker buy
if pos_size == 0.0:
# create shrt pos
if ss['do_shrt']:
price = calc_no_pos_ask_price(ss['price_step'], ss['ema_spread'], ema, ob[1])
if t[0] > price:
did_trade = True
qty = -min_entry_qty_f(ss['qty_step'], ss['min_qty'], ss['min_cost'],
ss['entry_qty_pct'], ss['leverage'],
apparent_balance, price)
trade_type, trade_side = 'entry', 'shrt'
pnl = 0.0
fee_paid = -cost_f(-qty, price) * ss['maker_fee']
elif pos_size < 0.0:
closest_shrt_liq = min(calc_diff(liq_price, t[0]), closest_shrt_liq)
if t[0] >= liq_price and closest_shrt_liq < 0.2:
# shrt liquidation
print('\nshrt liquidation')
return []
if t[0] > reentry_price:
# add to shrt pos
did_trade, qty, price = True, reentry_qty, reentry_price
trade_type, trade_side = 'reentry', 'shrt'
pnl = 0.0
fee_paid = -cost_f(-qty, price) * ss['maker_fee']
# check if shrt stop loss triggered
if t[0] >= stop_loss_liq_diff_price:
stop_loss_price = ob[0]
stop_loss_type = 'stop_loss_liq_diff'
elif t[0] >= stop_loss_pos_price_diff_price:
stop_loss_price = ob[0]
stop_loss_type = 'stop_loss_pos_price_diff'
else:
stop_loss_price = 0.0
else:
# close long pos
if t[0] >= pos_price:
min_close_qty = calc_min_close_qty(
ss['qty_step'], ss['min_qty'], ss['min_close_qty_multiplier'],
min_entry_qty_f(ss['qty_step'], ss['min_qty'], ss['min_cost'],
ss['entry_qty_pct'], ss['leverage'], apparent_balance,
t[0])
)
qtys, prices = calc_long_closes(ss['price_step'],
ss['qty_step'],
min_close_qty,
ss['min_markup'],
ss['max_markup'],
pos_size,
pos_price,
ob[1],
ss['n_close_orders'])
if t[0] > prices[0]:
did_trade, qty, price = True, qtys[0], prices[0]
trade_type, trade_side = 'close', 'long'
pnl = long_pnl_f(pos_price, price, -qty)
fee_paid = - cost_f(-qty, price) * ss['maker_fee']
elif stop_loss_price > 0.0 and t[0] > stop_loss_price:
# long stop loss
did_trade = True
qty = -calc_pos_reduction_qty(ss['qty_step'], ss['stop_loss_pos_reduction'],
pos_size)
price = stop_loss_price
trade_type, trade_side = stop_loss_type, 'long'
pnl = long_pnl_f(pos_price, price, qty)
fee_paid = -cost_f(-qty, price) * ss['maker_fee']
ob[1] = t[0]
ema = ema * ema_alpha_ + t[0] * ema_alpha
if did_trade:
if t[2] - prev_trade_ts < min_trade_delay_millis:
if trade_type == 'reentry':
# because of live bot's multiple open orders,
# allow consecutive reentries whose timestamp diff < min delay
if trades[-1]['type'] != 'reentry':
continue
else:
continue
prev_trade_ts = t[2]
new_pos_size = round_(pos_size + qty, 0.0000000001)
if 'entry' in trade_type:
pos_price = pos_price * abs(pos_size / new_pos_size) + \
price * abs(qty / new_pos_size) if new_pos_size else np.nan
pos_size = new_pos_size
actual_balance = max(ss['starting_balance'], actual_balance + pnl + fee_paid)
apparent_balance = actual_balance * ss['balance_pct']
if pos_size == 0.0:
liq_price = 0.0
elif pos_size > 0.0:
liq_price = long_liq_price_f(actual_balance, pos_size, pos_price)
else:
liq_price = shrt_liq_price_f(actual_balance, pos_size, pos_price)
if liq_price < 0.0:
liq_price = 0.0
progress = k / len(ticks)
pnl_plus_fee = pnl + fee_paid
pnl_plus_fees_cumsum += pnl_plus_fee
if trade_type.startswith('stop_loss'):
loss_cumsum += pnl
else:
profit_cumsum += pnl
fee_paid_cumsum += fee_paid
total_gain = (pnl_plus_fees_cumsum + settings['starting_balance']) / settings['starting_balance']
n_days_ = (t[2] - ticks[0][2]) / (1000 * 60 * 60 * 24)
try:
adg = total_gain ** (1 / n_days_) if (n_days_ > 0.0 and total_gain > 0.0) else 0.0
except:
adg = 0.0
avg_gain_per_tick = \
(actual_balance / settings['starting_balance']) ** (1 / (len(trades) + 1))
millis_since_prev_trade = t[2] - trades[-1]['timestamp'] if trades else 0.0
trades.append({'trade_id': k, 'side': trade_side, 'type': trade_type, 'price': price,
'qty': qty, 'pos_price': pos_price, 'pos_size': pos_size,
'liq_price': liq_price, 'pnl': pnl, 'fee_paid': fee_paid,
'pnl_plus_fee': pnl_plus_fee, 'fee_paid_cumsum': fee_paid_cumsum,
'apparent_balance': apparent_balance, 'actual_balance': actual_balance,
'profit_cumsum': profit_cumsum, 'loss_cumsum': loss_cumsum,
'pnl_plus_fees_cumsum': pnl_plus_fees_cumsum,
'average_daily_gain': adg, 'timestamp': t[2],
'closest_long_liq': closest_long_liq,
'closest_shrt_liq': closest_shrt_liq,
'closest_liq': min(closest_long_liq, closest_shrt_liq),
'avg_gain_per_tick': avg_gain_per_tick,
'millis_since_prev_trade': millis_since_prev_trade,
'progress': progress})
closest_long_liq, closest_shrt_liq = 1.0, 1.0
for key, condition in break_on.items():
if condition(trades, ticks, k):
print('break on', key)
return []
if pos_size > 0.0:
stop_loss_liq_diff_price = liq_price * (1 + ss['stop_loss_liq_diff'])
stop_loss_pos_price_diff_price = pos_price * (1 - ss['stop_loss_pos_price_diff'])
stop_loss_price = 0.0
reentry_price = min(
ob[0],
calc_long_reentry_price(ss['price_step'], ss['grid_spacing'],
ss['grid_coefficient'], apparent_balance,
pos_margin_f(ss['leverage'], pos_size, pos_price),
pos_price)
)
reentry_price = max(ss['price_step'], reentry_price)
min_qty_ = min_qty_f(ss['qty_step'], ss['min_qty'], ss['min_cost'], reentry_price)
reentry_qty = calc_reentry_qty(ss['qty_step'],
ss['ddown_factor'],
min_qty_,
max_pos_size_f(ss['leverage'], apparent_balance,
reentry_price),
pos_size)
if reentry_qty < min_qty_:
reentry_price = ss['price_step']
trades[-1]['reentry_price'] = reentry_price
elif pos_size < 0.0:
stop_loss_liq_diff_price = liq_price * (1 - ss['stop_loss_liq_diff']) \
if liq_price > 0.0 else pos_price * 10000
stop_loss_pos_price_diff_price = pos_price * (1 + ss['stop_loss_pos_price_diff'])
stop_loss_price = 0.0
reentry_price = max([
ss['price_step'],
ob[1],
calc_shrt_reentry_price(ss['price_step'], ss['grid_spacing'],
ss['grid_coefficient'], apparent_balance,
pos_margin_f(ss['leverage'], pos_size, pos_price),
pos_price)
])
min_qty_ = min_qty_f(ss['qty_step'], ss['min_qty'], ss['min_cost'], reentry_price)
reentry_qty = -calc_reentry_qty(ss['qty_step'],
ss['ddown_factor'],
min_qty_,
max_pos_size_f(ss['leverage'], apparent_balance,
reentry_price),
pos_size)
if -reentry_qty < min_qty_:
reentry_price = 9e12
trades[-1]['reentry_price'] = reentry_price
else:
trades[-1]['reentry_price'] = np.nan
line = f"\r{progress:.3f} pnl plus fees cumsum {pnl_plus_fees_cumsum:.8f} "
line += f"profit cumsum {profit_cumsum:.5f} "
line += f"loss cumsum {loss_cumsum:.5f} "
line += f"actual_bal {actual_balance:.4f} "
line += f"apparent_bal {apparent_balance:.4f} "
#line += f"qty {calc_min_entry_qty_(apparent_balance, ob[0]):.4f} "
#line += f"adg {trades[-1]['average_daily_gain']:.3f} "
#line += f"max pos pct {abs(pos_size) / calc_max_pos_size(apparent_balance, t[0]):.3f} "
line += f"pos size {pos_size:.4f} "
print(line, end=' ')
return trades
def calc_new_val(val, range_, m):
choice_span = (range_[1] - range_[0]) * m / 2
biased_mid_point = max(range_[0] + choice_span, min(val, range_[1] - choice_span))
choice_range = (biased_mid_point - choice_span, biased_mid_point + choice_span)
new_val = np.random.choice(np.linspace(choice_range[0], choice_range[1], 200))
return round_(new_val, range_[2])
def get_new_candidate(ranges: dict, best: dict, m=0.2):
new_candidate = {}
for key in best:
if key not in ranges:
continue
if type(best[key]) == tuple:
new_candidate[key] = tuple(sorted([calc_new_val(e, ranges[key], m) for e in best[key]]))
else:
new_candidate[key] = calc_new_val(best[key], ranges[key], m)
return {k_: new_candidate[k_] for k_ in sorted(new_candidate)}
def get_downloaded_trades(filepath: str, age_limit_millis: float) -> (pd.DataFrame, dict):
if os.path.isdir(filepath):
filenames = sorted([f for f in os.listdir(filepath) if f.endswith('.csv')],
key=lambda x: int(x[:x.find('_')].replace('.cs', '').replace('v', '')))
chunks = []
chunk_lengths = {}
for f in filenames[::-1]:
chunk = pd.read_csv(filepath + f).set_index('trade_id')
chunk_lengths[f] = len(chunk)
print('\rloaded chunk of trades', f, ts_to_date(chunk.timestamp.iloc[0] / 1000),
end=' ')
chunks.append(chunk)
if chunk.timestamp.iloc[0] < age_limit_millis:
break
if chunks:
df = | pd.concat(chunks, axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 31 10:44:05 2019
@author: WT
"""
import os
import networkx as nx
import numpy as np
import pandas as pd
import torch
from .preprocessing_funcs import load_pickle, save_as_pickle, generate_text_graph
import logging
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
logger = logging.getLogger(__file__)
def load_datasets(args, train_test_split=0):
"""Loads dataset and graph if exists, else create and process them from raw data
Returns --->
f: torch tensor input of GCN (Identity matrix)
X: input of GCN (Identity matrix)
A_hat: transformed adjacency matrix A
selected: indexes of selected labelled nodes for training
test_idxs: indexes of not-selected nodes for inference/testing
labels_selected: labels of selected labelled nodes for training
labels_not_selected: labels of not-selected labelled nodes for inference/testing
"""
logger.info("Loading data...")
df_data_path = "./data/df_data.pkl"
graph_path = "./data/text_graph.pkl"
if not os.path.isfile(df_data_path) or not os.path.isfile(graph_path):
logger.info("Building datasets and graph from raw data... Note this will take quite a while...")
generate_text_graph(args.train_data, args.infer_data, args.max_vocab_len)
df_data = load_pickle("df_data.pkl")
G_dict = load_pickle("text_graph.pkl")
G = G_dict["graph"]
infer_idx_start = G_dict["infer_idx_start"]
del G_dict
logger.info("Building adjacency and degree matrices...")
A = nx.to_numpy_matrix(G, weight="weight"); A = A + np.eye(G.number_of_nodes())
degrees = []
for d in G.degree(weight=None):
if d == 0:
degrees.append(0)
else:
degrees.append(d[1]**(-0.5))
degrees = np.diag(degrees)
X = np.eye(G.number_of_nodes()) # Features are just identity matrix
A_hat = degrees@A@degrees
f = X # (n X n) X (n X n) x (n X n) X (n X n) input of net
if train_test_split == 1:
logger.info("Splitting labels for training and inferring...")
### stratified test samples
test_idxs = []
for b_id in df_data["label"].unique():
dum = df_data[df_data["label"] == b_id]
if len(dum) >= 4:
test_idxs.extend(list(np.random.choice(dum.index, size=round(args.test_ratio*len(dum)), replace=False)))
save_as_pickle("test_idxs.pkl", test_idxs)
# select only certain labelled nodes for semi-supervised GCN
selected = []
for i in range(len(df_data)):
if i not in test_idxs:
selected.append(i)
save_as_pickle("selected.pkl", selected)
else:
logger.info("Preparing training labels...")
test_idxs = [i for i in range(infer_idx_start, len(df_data))]
selected = [i for i in range(infer_idx_start)]
save_as_pickle("selected.pkl", selected)
save_as_pickle("test_idxs.pkl", test_idxs)
f_selected = f[selected]; f_selected = torch.from_numpy(f_selected).float()
f_not_selected = f[test_idxs]; f_not_selected = torch.from_numpy(f_not_selected).float()
labels_selected = list(df_data.loc[selected]['label'])
if train_test_split == 1:
labels_not_selected = list(df_data.loc[test_idxs]['label'])
else:
labels_not_selected = []
f = torch.from_numpy(f).float()
save_as_pickle("labels_selected.pkl", labels_selected)
save_as_pickle("labels_not_selected.pkl", labels_not_selected)
logger.info("Split into %d train and %d test lebels." % (len(labels_selected), len(labels_not_selected)))
return f, X, A_hat, selected, labels_selected, labels_not_selected, test_idxs
def load_state(net, optimizer, scheduler, model_no=0, load_best=False):
""" Loads saved model and optimizer states if exists """
logger.info("Initializing model and optimizer states...")
base_path = "./data/"
checkpoint_path = os.path.join(base_path,"test_checkpoint_%d.pth.tar" % model_no)
best_path = os.path.join(base_path,"test_model_best_%d.pth.tar" % model_no)
start_epoch, best_pred, checkpoint = 0, 0, None
if (load_best == True) and os.path.isfile(best_path):
checkpoint = torch.load(best_path)
logger.info("Loaded best model.")
elif os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
logger.info("Loaded checkpoint model.")
if checkpoint != None:
start_epoch = checkpoint['epoch']
best_pred = checkpoint['best_acc']
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
logger.info("Loaded model and optimizer.")
return start_epoch, best_pred
def load_results(model_no=0):
""" Loads saved results if exists """
losses_path = "./data/test_losses_per_epoch_%d.pkl" % model_no
accuracy_path = "./data/test_accuracy_per_epoch_%d.pkl" % model_no
train_accuracy_path = "./data/train_accuracy_per_epoch_%d.pkl" % model_no
if os.path.isfile(losses_path) and os.path.isfile(accuracy_path) and os.path.isfile(train_accuracy_path):
losses_per_epoch = load_pickle("test_losses_per_epoch_%d.pkl" % model_no)
accuracy_per_epoch = load_pickle("test_accuracy_per_epoch_%d.pkl" % model_no)
train_accuracy_per_epoch = load_pickle("train_accuracy_per_epoch_%d.pkl" % model_no)
logger.info("Loaded results buffer")
else:
losses_per_epoch, train_accuracy_per_epoch, accuracy_per_epoch = [], [], []
return losses_per_epoch, train_accuracy_per_epoch, accuracy_per_epoch
def evaluate(output, labels_e):
if len(labels_e) == 0:
return 0
else:
_, labels = output.max(1); labels = labels.cpu().numpy() if labels.is_cuda else labels.numpy()
return sum([(e) for e in labels_e] == labels)/len(labels)
def infer(f, test_idxs, net):
logger.info("Evaluating on inference data...")
net.eval()
with torch.no_grad():
pred_labels = net(f)
if pred_labels.is_cuda:
pred_labels = list(pred_labels[test_idxs].max(1)[1].cpu().numpy())
else:
pred_labels = list(pred_labels[test_idxs].max(1)[1].numpy())
pred_labels = [i for i in pred_labels]
test_idxs = [i - test_idxs[0] for i in test_idxs]
df_results = | pd.DataFrame(columns=["index", "predicted_label"]) | pandas.DataFrame |
"""QUANTAXIS 扩展"""
from collections import deque
import QUANTAXIS as QA
import numpy as np
import pandas as pd
class QA_Performance_Ext(QA.QA_Performance):
@property
def pnl_fifo_ext(self):
"""修改基类中的 `pnl_fif`。
基类中的方法计算错误,并没有使用先进先出的逻辑。参见代码中的具体描述
Returns: 在原本返回成交配对DataFrame的结果上,
增加了返回尚未配对成功的dict
(主键为股票代码,value为deque队列,其中包含元组[交易日期,交易数量,交易金额])
"""
X = dict(
zip(
self.target.code,
[deque() for i in range(len(self.target.code))]
)
)
pair_table = []
for _, data in self.target.history_table.iterrows():
while True:
if len(X[data.code]) == 0:
X[data.code].append(
(data.datetime,
data.amount,
data.price)
)
break
else:
l = X[data.code].popleft()
if (l[1] * data.amount) < 0:
# 原有多仓/ 平仓 或者原有空仓/平仓
if abs(l[1]) > abs(data.amount):
temp = (l[0], l[1] + data.amount, l[2])
X[data.code].appendleft(temp)
if data.amount < 0:
pair_table.append(
[
data.code,
data.datetime,
l[0],
abs(data.amount),
data.price,
l[2]
]
)
break
else:
pair_table.append(
[
data.code,
l[0],
data.datetime,
abs(data.amount),
data.price,
l[2]
]
)
break
elif abs(l[1]) < abs(data.amount):
data.amount = data.amount + l[1]
if data.amount < 0:
pair_table.append(
[
data.code,
data.datetime,
l[0],
l[1],
data.price,
l[2]
]
)
else:
pair_table.append(
[
data.code,
l[0],
data.datetime,
l[1],
data.price,
l[2]
]
)
else:
if data.amount < 0:
pair_table.append(
[
data.code,
data.datetime,
l[0],
abs(data.amount),
data.price,
l[2]
]
)
break
else:
pair_table.append(
[
data.code,
l[0],
data.datetime,
abs(data.amount),
data.price,
l[2]
]
)
break
else:
# 主要修改在这里。主要是顺序错了。
X[data.code].append(
(data.datetime,
data.amount,
data.price)
)
X[data.code].appendleft(l)
break
pair_title = [
'code',
'sell_date',
'buy_date',
'amount',
'sell_price',
'buy_price'
]
pnl = pd.DataFrame(pair_table, columns=pair_title).set_index('code')
pnl = pnl.assign(pnl_ratio=(pnl.sell_price / pnl.buy_price) - 1).assign(
buy_date= | pd.to_datetime(pnl.buy_date) | pandas.to_datetime |
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from src.contact_models.contact_model_functions import _draw_nr_of_contacts
from src.contact_models.contact_model_functions import _draw_potential_vacation_contacts
from src.contact_models.contact_model_functions import (
_identify_ppl_affected_by_vacation,
)
from src.contact_models.contact_model_functions import (
calculate_non_recurrent_contacts_from_empirical_distribution,
)
from src.contact_models.contact_model_functions import go_to_daily_work_meeting
from src.contact_models.contact_model_functions import go_to_weekly_meeting
from src.contact_models.contact_model_functions import meet_daily_other_contacts
from src.contact_models.contact_model_functions import reduce_contacts_on_condition
from src.shared import draw_groups
@pytest.fixture
def params():
params = pd.DataFrame()
params["category"] = ["work_non_recurrent"] * 2 + ["other_non_recurrent"] * 2
params["subcategory"] = [
"symptomatic_multiplier",
"positive_test_multiplier",
] * 2
params["name"] = ["symptomatic_multiplier", "positive_test_multiplier"] * 2
params["value"] = [0.0, 0.0, 0.0, 0.0]
params.set_index(["category", "subcategory", "name"], inplace=True)
return params
@pytest.fixture
def states():
"""states DataFrame for testing purposes.
Columns:
- date: 2020-04-01 - 2020-04-30
- id: 50 individuals, with 30 observations each. id goes from 0 to 49.
- immune: bool
- infectious: bool
- age_group: ordered Categorical, either 10-19 or 40-49.
- region: unordered Categorical, ['Overtjssel', 'Drenthe', 'Gelderland']
- n_has_infected: int, 0 to 3.
- cd_infectious_false: int, -66 to 8.
- occupation: Categorical. "working" or "in school".
- cd_symptoms_false: int, positive for the first 20 individuals, negative after.
"""
this_modules_path = Path(__file__).resolve()
states = pd.read_parquet(this_modules_path.parent / "1.parquet")
old_to_new = {old: i for i, old in enumerate(sorted(states["id"].unique()))}
states["id"].replace(old_to_new, inplace=True)
states["age_group"] = pd.Categorical(
states["age_group"], ["10 - 19", "40 - 49"], ordered=True
)
states["age_group"] = states["age_group"].cat.rename_categories(
{"10 - 19": "10-19", "40 - 49": "40-49"}
)
states["region"] = pd.Categorical(
states["region"], ["Overtjssel", "Drenthe", "Gelderland"], ordered=False
)
states["date"] = pd.to_datetime(states["date"], format="%Y-%m-%d", unit="D")
states["n_has_infected"] = states["n_has_infected"].astype(int)
states["cd_infectious_false"] = states["cd_infectious_false"].astype(int)
states["occupation"] = states["age_group"].replace(
{"10-19": "in school", "40-49": "working"}
)
states["cd_symptoms_false"] = list(range(1, 21)) + list(range(-len(states), -20))
states["symptomatic"] = states["cd_symptoms_false"] >= 0
states["knows_infectious"] = False
states["knows_immune"] = False
states["cd_received_test_result_true"] = -100
states["knows_currently_infected"] = states.eval(
"knows_infectious | (knows_immune & symptomatic) "
"| (knows_immune & (cd_received_test_result_true >= -13))"
)
states["quarantine_compliance"] = 1.0
return states
@pytest.fixture
def a_thursday(states):
a_thursday = states[states["date"] == "2020-04-30"].copy()
a_thursday["cd_symptoms_false"] = list(range(1, 21)) + list(
range(-len(a_thursday), -20)
)
a_thursday["symptomatic"] = a_thursday["cd_symptoms_false"] >= 0
a_thursday["work_recurrent_weekly"] = draw_groups(
df=a_thursday,
query="occupation == 'working'",
assort_bys=["region"],
n_per_group=20,
seed=484,
)
return a_thursday
@pytest.fixture
def no_reduction_params():
params = pd.DataFrame()
params["subcategory"] = ["symptomatic_multiplier", "positive_test_multiplier"]
params["name"] = params["subcategory"]
params["value"] = 1.0
params = params.set_index(["subcategory", "name"])
return params
# ----------------------------------------------------------------------------
def test_go_to_weekly_meeting_wrong_day(a_thursday):
a_thursday["group_col"] = [1, 2, 1, 2, 3, 3, 3] + [-1] * (len(a_thursday) - 7)
contact_params = pd.DataFrame()
group_col_name = "group_col"
day_of_week = "Saturday"
seed = 3931
res = go_to_weekly_meeting(
a_thursday, contact_params, group_col_name, day_of_week, seed
)
expected = pd.Series(False, index=a_thursday.index)
assert_series_equal(res, expected, check_names=False)
def test_go_to_weekly_meeting_right_day(a_thursday, no_reduction_params):
a_thursday["group_col"] = [1, 2, 1, 2, 3, 3, 3] + [-1] * (len(a_thursday) - 7)
res = go_to_weekly_meeting(
states=a_thursday,
params=no_reduction_params,
group_col_name="group_col",
day_of_week="Thursday",
seed=3931,
)
expected = pd.Series(False, index=a_thursday.index)
expected[:7] = True
assert_series_equal(res, expected, check_names=False)
def test_go_to_daily_work_meeting_weekend(states, no_reduction_params):
a_saturday = states[states["date"] == pd.Timestamp("2020-04-04")].copy()
a_saturday["work_saturday"] = [True, True] + [False] * (len(a_saturday) - 2)
a_saturday["work_daily_group_id"] = 333
res = go_to_daily_work_meeting(a_saturday, no_reduction_params, seed=None)
expected = pd.Series(False, index=a_saturday.index)
expected[:2] = True
assert_series_equal(res, expected, check_names=False)
def test_go_to_daily_work_meeting_weekday(a_thursday, no_reduction_params):
a_thursday["work_daily_group_id"] = [1, 2, 1, 2, 3, 3, 3] + [-1] * (
len(a_thursday) - 7
)
res = go_to_daily_work_meeting(a_thursday, no_reduction_params, seed=None)
expected = pd.Series(False, index=a_thursday.index)
# not every one we assigned a group id is a worker
expected.iloc[:7] = [True, True, False, True, True, False, True]
| assert_series_equal(res, expected, check_names=False) | pandas.testing.assert_series_equal |
import os
import pandas as pd
import numpy as np
def load_trade_xls(filepath: str, exchange: str, symbol: str, sid: int, eid: int, subffix='trade'):
filename = f'{exchange}-{symbol}-{str(sid)}-{str(eid)}-{subffix}.xlsx'
print(filename)
ndf = pd.read_excel(os.path.join(filepath, str(symbol), filename))
return ndf
def load_trades_xls(filepath: str, exchange: str, symbol: str, sid: int, eid: int, subffix='trade', step=100000):
buf = []
sid = int(np.floor(sid / step) * step)
eid = int(np.floor(eid / step) * step)
idlist = list(range(sid, eid, step))
for id in idlist:
ndf = load_trade_xls(filepath, exchange, symbol, id+1, id+step, subffix)
buf.append(ndf)
alldf = pd.DataFrame(columns=buf[0].columns)
alldf = alldf.append(buf, ignore_index=True, )
return alldf
def datetime_tick(tick: pd.DataFrame):
tick['Date'] = | pd.to_datetime(tick['time'], unit='ms', utc=True) | pandas.to_datetime |
"""
Functions for preprocessing data for training purposes
Author: <NAME>
April 2019
"""
import pandas as pd
import numpy as np
import gensim
def preprocess(text, min_token_length = 0, join = False):
""" Method for preprocessing text
Args:
text: string of text
min_token_length: integer value indicating min number of characters in a token
join: boolean indicating if function should join the list of tokens into the string or not
Returns:
list of cleaned words or joined string
"""
if type(text) != str:
return []
result = []
for token in gensim.utils.simple_preprocess(text, min_len = min_token_length):
if len(token) > 2:
result.append(token)
if join:
return ' '.join(result)
return result
def create_dictionary(train_data, no_below = 1, no_above = 0.25, keep_n = 95000, min_token_length = 0):
""" Create dictionary of all words in our dataset that adhere to the following conditions:
Args:
train_data: dataframe with questions
no_below: integer = minimum number of occurrences in the dataset
no_above: float between 0 and 1 - proportion of sentences containing word
keep_n: max number of words in our vocabulary
min_token_length: minimum number of characters a token must have
Returns:
dictionary of words found in training set in "dict" format
"""
documents = train_data[['helpdesk_question']]
documents['index'] = documents.index
processed_docs = documents['helpdesk_question'].apply(preprocess, args = [min_token_length])
dictionary = gensim.corpora.Dictionary(processed_docs)
dictionary.filter_extremes(no_below=no_below, no_above=no_above, keep_n=keep_n)
dictionary_of_words = pd.DataFrame(pd.Series(dict(dictionary)))
dictionary_of_words['index'] = dictionary_of_words.index
return dictionary_of_words.set_index(0)['index'].to_dict()
def filter_words(text_list, dictionary):
""" Filter sentences to remove any words from that does not appear in our dictionary
Args:
text_list: list of words in a sentence
dictionary: dictionary of words in training set
Returns:
Filtered list of words in a sentence
"""
result = []
for t in text_list:
if dictionary.get(t) != None:
result.append(t)
return result
def preprocess_question(question, dictionary, minimum_token_length):
""" Create list of cleaned and filtered words for each sentence
Args:
question: string text
dictionary: dictionary of words in training set
Return:
Cleaned and filtered list of words
"""
return filter_words(preprocess(question, minimum_token_length), dictionary)
def create_lookup_tables(unique_words):
""" Create lookup tables for word_to_id and id_to_word
Args:
unique_words: dictionary of words in training set
Return:
word_to_id: dict with words as keys and corresponding ids as values
id_to_word: dict with ids as keys and corresponding words as values
"""
word_to_id = {} # word->id lookup
id_to_word = {} # id->word lookup
for index, word in enumerate(sorted(list(unique_words))):
word_to_id[word] = index + 1
id_to_word[index + 1] = word
return word_to_id, id_to_word
def transform_sequence_to_word_ids(seq, word_to_id):
""" Create list of word IDs for sequence of words, padded with zeroes and truncated to a fixed length
Args:
seq: list of words
word_to_id: dict with words as keys and corresponding ids as values
Return:
list of word IDs padded and truncated to length of 30 items
"""
seq_word_ids = []
for word in seq:
seq_word_ids.append([word_to_id[word]])
# pad sequence with zeros
for i in range(30 - len(seq_word_ids)):
seq_word_ids.append([0])
return seq_word_ids[:30]
def create_one_hot_vector_for_reply(reply, all_responses):
""" Constructs a one-hot vector for replies
Args:
reply: query item
all_responses: dict containing all the template responses with their corresponding IDs
Return:
a one-hot vector where the corresponding ID of the reply is the one-hot index
"""
Y = np.zeros(len(all_responses), dtype = int)
Y[all_responses[reply]] += 1
return Y
def label_preprocess(entry, responses):
""" Returns integer ID corresponding to response for easy comparison and classification
Args:
entry: query item
responses: dict containing all the template responses with their corresponding IDs
Return:
integer corresponding to each response
"""
if responses.get(entry) != None:
return responses[entry]
else:
return len(responses) #default unknown class
def sample_pairs_offline(df, sample_size = 10):
""" Offline sampling for sentence pairs
Args:
df: dataframe of questions and answers
sample_size: number of positive/negative samples per sentence
Returns:
a data frame of positive and negative pairs
"""
sentences_1 = []
sentences_2 = []
labels = []
sample_size = sample_size
df['helpdesk_question_clean'] = df['helpdesk_question'].apply(preprocess_data.preprocess, args = [0, True])
for group in df.groupby('helpdesk_reply'):
questions = list(group[1]['helpdesk_question_clean'])
low_resource = list(group[1]['low_resource'])
for i in range(len(questions)):
q = questions[i]
if len(preprocess_data.preprocess(q, 0)) > 0:
for s in list(group[1]['helpdesk_question_clean'].sample(sample_size)):
if s != q and len(preprocess_data.preprocess(s, 0)) > 0:
if s > q:
sentences_1.append(s)
sentences_2.append(q)
labels.append(1) # positive
else:
sentences_1.append(q)
sentences_2.append(s)
labels.append(1) # positive
#sample negatives
negatives = df.loc[df['helpdesk_reply'] != group[0]]
samples = negatives['helpdesk_question_clean'].sample(sample_size)
if samples.shape[0] > 0:
for s in list(samples):
if len(preprocess_data.preprocess(s, 0)) > 0:
if s > q:
sentences_1.append(s)
sentences_2.append(q)
labels.append(0) # negative
else:
sentences_1.append(q)
sentences_2.append(s)
labels.append(0) #negative
data_pairs = pd.concat([pd.Series(sentences_1), pd.Series(sentences_2), | pd.Series(labels) | pandas.Series |
import pandas as pd
import pytest
import numpy as np
from spatialHeterogeneity.metrics.heterogeneity.base_metrics import _shannon, _shannon_evenness, _simpson,\
_simpson_evenness, _gini_simpson, _richness, _hill_number, _renyi, _abundance, _quadratic_entropy
from collections import Counter
obs = [[1], [1,1], [1,2]]
_counts = [Counter(i) for i in obs]
_expected = [0, 0, 1]
@pytest.mark.parametrize('input, res', [(c,e) for c,e in zip(_counts, _expected)])
def test_shannon(input, res):
assert np.isclose(_shannon(input),res)
_expected = [0, 0, 1]
@pytest.mark.parametrize('input, res', [(c,e) for c,e in zip(_counts, _expected)])
def test_shannon_evenness(input, res):
assert np.isclose(_shannon_evenness(input), res)
_expected = [1, 1, 0.5]
@pytest.mark.parametrize('input, res', [(c,e) for c,e in zip(_counts, _expected)])
def test_simpson(input, res):
assert np.isclose(_simpson(input), res)
_expected = [1, 1, 1]
@pytest.mark.parametrize('input, res', [(c,e) for c,e in zip(_counts, _expected)])
def test_simpson_evenness(input, res):
assert np.isclose(_simpson_evenness(input), res)
_expected = [0, 0, 0.5]
@pytest.mark.parametrize('input, res', [(c,e) for c,e in zip(_counts, _expected)])
def test_gini_simpson(input, res):
assert np.isclose(_gini_simpson(input), res)
_expected = [1, 1, 2]
@pytest.mark.parametrize('input, res', [(c,e) for c,e in zip(_counts, _expected)])
def test_richness(input, res):
assert np.isclose(_richness(input),res)
_expected = [1, 1, np.sqrt(0.5)]
@pytest.mark.parametrize('input, res', [(c,e) for c,e in zip(_counts, _expected)])
def test_hill_number(input, res, q=2):
assert _hill_number(input, q=q) == res
_expected = [0, 0, -1/1*np.log2(0.5)]
@pytest.mark.parametrize('input, res', [(c,e) for c,e in zip(_counts, _expected)])
def test_renyi(input, res, q=2):
assert np.isclose(_renyi(input, q=q),res)
_expected = [pd.Series([1], index=[1]), pd.Series([1], index=[1]), | pd.Series([0.5,0.5], [1,2]) | pandas.Series |
import numpy as np, pandas as pd, networkx as nx, multiprocessing as mp, sys, traceback
from scipy.sparse.linalg import inv
from scipy.sparse import csr_matrix, identity
from scipy.stats import norm, binom
from data_module import *
from DGP_module import *
from inference_module import *
##### User parameters #####
processes = 16 # number of parallel processes
network_model = 'RGG' # options: config, RGG
B = 10000 # number of simulations
alpha = 0.05 # significance level of t-test
num_schools = [1,2,4] # number of schools to include in sample
p = 0.5 # treatment probability
theta_LIM = np.array([-1,0.8,1,1]) # structural parameters for linear-in-means: intercept,
# endogenous, exogenous, and treatment effect
theta_TSI = np.array([-1,1.5,1,1]) # structural parameters for threshold model: intercept,
# endogenous, exogenous, and treatment effect
save_csv = True # save output in CSV
estimands_only = False # only simulate estimands
manual_estimands = False # use previously simulated estimands (hard-coded below)
half_sims = 0 # 1 = only 1st half of sims, 2 = only 2nd half, any other number = run all.
print('theta: {},{}'.format(theta_LIM,theta_TSI))
exp_nbhd = network_model != 'RGG' # exponential (True) or polynomial (False) neighborhood growth rates
##### Task per node #####
def one_sim(b, deg_seq, eligibles, estimates_only, estimand_LIM, estimand_TSI, oracle_SE_LIM, oracle_SE_TSI):
"""
Task to be parallelized: one simulation draw. Set estimates_only to True if you only want to return estimators.
"""
n = deg_seq.size
c = 2 if estimates_only else 1
seed = int(n*c*(B*10) + b)
np.random.seed(seed=seed)
if b%100 == 0:
print(' b = {}'.format(b))
sys.stdout.flush()
# simulate data
if network_model == 'config':
A = nx.configuration_model(deg_seq, seed=seed)
A = nx.Graph(A) # remove multi-edges
A.remove_edges_from(nx.selfloop_edges(A)) # remove self-loops
errors = np.random.normal(size=n)
elif network_model == 'RGG':
positions = np.random.uniform(size=(n,2))
A = gen_RGG(positions, (deg_seq.mean()/ball_vol(2,1)/n)**(1/2))
errors = np.random.normal(size=n) + (positions[:,0] - 0.5)
else:
raise ValueError('Not a valid choice of network model.')
A_mat = nx.to_scipy_sparse_matrix(A, nodelist=range(n), format='csc')
deg_seq_sim = np.squeeze(A_mat.dot(np.ones(n)[:,None]))
r,c = A_mat.nonzero()
rD_sp = csr_matrix(((1.0/np.maximum(deg_seq_sim,1))[r], (r,c)), shape=(A_mat.shape))
A_norm = A_mat.multiply(rD_sp) # row-normalized adjacency matrix
friends_eligible = np.squeeze(np.asarray(A_mat.dot(eligibles[:,None])))
D = np.zeros(n)
D[eligibles] = np.random.binomial(1,p,eligibles.sum()) # assign treatments to eligibles
LIM_inv = inv( identity(n,format='csc') - theta_LIM[1]*A_norm ) # (I - beta * \tilde{A})^{-1}; used to simulate linear in means model; csc better for inverse
Y_LIM = linear_in_means(D, A_norm, LIM_inv, errors, theta_LIM)
Y_TSI = threshold_model(D, A_norm, errors, theta_TSI)
friends_treated = np.squeeze(np.asarray(A_mat.dot(D[:,None]))) # num friends treated
# estimation
pop = (friends_eligible > 0) # indicators for inclusion in population, in this case only include units with eligible friends
pscores0 = binom(friends_eligible,p).pmf(0)
pscores1 = 1 - binom(friends_eligible,p).pmf(0)
ind1 = friends_treated > 0 # exposure mapping indicators for spillover effect
ind0 = 1 - ind1
Zs_LIM = make_Zs(Y_LIM,ind1,ind0,pscores1,pscores0,pop)
Zs_TSI = make_Zs(Y_TSI,ind1,ind0,pscores1,pscores0,pop)
estimate_LIM = Zs_LIM[pop].mean()
estimate_TSI = Zs_TSI[pop].mean()
if estimates_only:
return [estimate_LIM, estimate_TSI]
else:
# standard errors
[SE_LIM,SE_TSI],APL,bandwidth,[PSD_failure_LIM,PSD_failure_TSI] \
= network_SE([Zs_LIM,Zs_TSI], A, pop, 1, exp_nbhd, True) # network-robust SE
naive_SE_LIM = Zs_LIM[pop].std() / math.sqrt(pop.sum()) # iid SE
naive_SE_TSI = Zs_TSI[pop].std() / math.sqrt(pop.sum())
# t-test
numerator_LIM = np.abs(estimate_LIM - estimand_LIM)
numerator_TSI = np.abs(estimate_TSI - estimand_TSI)
ttest_LIM = numerator_LIM / SE_LIM > norm.ppf(1-alpha/2)
ttest_TSI = numerator_TSI / SE_TSI > norm.ppf(1-alpha/2)
naive_ttest_LIM = numerator_LIM / naive_SE_LIM > norm.ppf(1-alpha/2)
naive_ttest_TSI = numerator_TSI / naive_SE_TSI > norm.ppf(1-alpha/2)
oracle_ttest_LIM = numerator_LIM / oracle_SE_LIM > norm.ppf(1-alpha/2)
oracle_ttest_TSI = numerator_TSI / oracle_SE_TSI > norm.ppf(1-alpha/2)
return [estimate_LIM, estimate_TSI, ttest_LIM, ttest_TSI, oracle_ttest_LIM, oracle_ttest_TSI, naive_ttest_LIM, naive_ttest_TSI, SE_LIM, SE_TSI, oracle_SE_LIM, oracle_SE_TSI, naive_SE_LIM, naive_SE_TSI, ind1[pop].sum(), ind0[pop].sum(), APL, bandwidth, PSD_failure_LIM, PSD_failure_TSI]
##### Containers #####
estimates_LIM = np.zeros(len(num_schools)) # treatment effect estimates for linear-in-means model
estimates_TSI = np.zeros(len(num_schools)) # treatment effect estimates for threshold model
ttests_LIM = np.zeros(len(num_schools)) # t-test for linear-in-means model using our standard errors
ttests_TSI = np.zeros(len(num_schools)) # t-test for threshold model using our standard errors
oracle_ttests_LIM = np.zeros(len(num_schools)) # t-test using true standard errors
oracle_ttests_TSI = np.zeros(len(num_schools))
naive_ttests_LIM = np.zeros(len(num_schools)) # t-test using iid standard errors
naive_ttests_TSI = np.zeros(len(num_schools))
SEs_LIM = np.zeros(len(num_schools)) # our standard errors
SEs_TSI = np.zeros(len(num_schools))
oracle_SEs_LIM = np.zeros(len(num_schools))
oracle_SEs_TSI = np.zeros(len(num_schools))
naive_SEs_LIM = np.zeros(len(num_schools))
naive_SEs_TSI = np.zeros(len(num_schools))
eff_SS1 = np.zeros(len(num_schools)) # number of units assigned to first exposure mapping
eff_SS0 = np.zeros(len(num_schools))
APLs = np.zeros(len(num_schools)) # average path length
bandwidths = np.zeros(len(num_schools)) # bandwidth
Ns = np.zeros(len(num_schools)).astype('int') # population sizes
PSD_failures_LIM = np.zeros(len(num_schools))
PSD_failures_TSI = np.zeros(len(num_schools))
##### Main #####
# assemble network data
_,D,A,_,_,IDs = assemble_data()
deg_seq = np.array([i[1] for i in A.out_degree])
A = A.to_undirected()
eligibles = (D >= 0)
for i,ns in enumerate(num_schools):
# select schools
if ns == 1:
students = (IDs[:,1] == 24)
elif ns == 2:
students = (IDs[:,1] == 24) + (IDs[:,1] == 22)
else:
students = (IDs[:,1] == 24) + (IDs[:,1] == 22) + (IDs[:,1] == 60) + (IDs[:,1] == 56)
print('n = {}'.format(students.sum()))
Ns[i] = students.sum()
if deg_seq[students].sum() % 2 != 0:
deg_seq_pop = deg_seq[students].copy()
deg_seq_pop[0] += 1 # need even total degree for configuration model
else:
deg_seq_pop = deg_seq[students]
if manual_estimands:
# HARD CODE simulated estimands and oracle SEs
if ns == 4 and network_model == 'config':
estimands = np.array([0.3059139,0.0805116])
oracle_SEs = np.array([0.69383978,0.06043479])
else:
estimands = np.array([0,0])
oracle_SEs = np.array([1,1])
else:
# simulate estimands and oracle standard errors
def one_sim_wrapper(b):
try:
return one_sim(b, deg_seq_pop, eligibles[students], True, 0, 0, 0, 0)
except:
print('%s: %s' % (b, traceback.format_exc()))
sys.stdout.flush()
sims_range = range(B,2*B)
if half_sims == 1:
sims_range = range(B,B+int(B/2))
elif half_sims == 2:
sims_range = range(B+int(B/2),2*B)
pool = mp.Pool(processes=processes, maxtasksperchild=1)
parallel_output = pool.imap(one_sim_wrapper, sims_range, chunksize=25)
pool.close()
pool.join()
results = np.array([r for r in parallel_output])
if half_sims in [1,2]:
gd = '_1' if half_sims==1 else '_2'
table = pd.DataFrame(results)
table.to_csv('half_sims_oracle_' + str(ns) + gd + '.csv', float_format='%.10f', index=False, header=False)
estimands = results.mean(axis=0)
oracle_SEs = results.std(axis=0)
print('Estimands: {}'.format(estimands)) # use these to HARD CODE estimands above
print('Oracle SEs: {}'.format(oracle_SEs))
sys.stdout.flush()
if estimands_only:
results = np.zeros(26)
else:
# simulate main results
def one_sim_wrapper(b):
try:
return one_sim(b, deg_seq_pop, eligibles[students], False, estimands[0], estimands[1], oracle_SEs[0], oracle_SEs[1])
except:
print('%s: %s' % (b, traceback.format_exc()))
sys.stdout.flush()
sims_range = range(B)
if half_sims == 1:
sims_range = range(int(B/2))
elif half_sims == 2:
sims_range = range(int(B/2),B)
pool = mp.Pool(processes=processes, maxtasksperchild=1)
parallel_output = pool.imap(one_sim_wrapper, sims_range, chunksize=25)
pool.close()
pool.join()
results = np.array([r for r in parallel_output])
if half_sims in [1,2]:
gd = '_1' if half_sims==1 else '_2'
table = pd.DataFrame(results)
table.to_csv('half_sims_main_' + str(ns) + gd + '.csv', float_format='%.10f', index=False, header=False)
results = results.mean(axis=0)
if half_sims == 2:
results1 = pd.read_csv('half_sims_main_4_1.csv', header=None)
results2 = | pd.read_csv('half_sims_main_4_2.csv', header=None) | pandas.read_csv |
# -*- coding: utf-8 -*-
import gzip
import io
import warnings
import numpy as np
import pandas as pd
from sklearn.metrics import average_precision_score, precision_recall_curve
from madoka.utils import wrap_text_writer
from .base import Report
from .helper import classfication_report_data_frame
__all__ = [
'classification_report', 'ClassificationReport', 'BinaryClassifyReport',
]
def classification_report(truth, predict, proba, name='Classification Report'):
"""Create a classification report.
Parameters
----------
truth : numpy.ndarray
Ground truth (correct) target values.
predict : numpy.ndarray
Estimated target as returned by the classifier.
proba : numpy.ndarray
Estimated probabilities for each target to be each class.
name : str
Name of this report.
"""
if len(proba.shape) == 1 or proba.shape[1] <= 2:
return _BinaryClassificationReport(truth, predict, proba, name=name)
return ClassificationReport(truth, predict, proba, name=name)
class ClassificationReport(Report):
"""Classification report."""
_ATTRIBUTE_LEGACY_MAPPING = {
'y_true': 'truth', 'y_pred': 'predict', 'y_proba': 'proba'
}
_PREDICT_FN = 'predict.csv.gz'
def __init__(self, truth, predict, proba, name=None):
super(ClassificationReport, self).__init__(name=name)
self.truth = truth
self.predict = predict
self.proba = proba
def _make_compressed_csv(self):
if getattr(self, '_compressed_csv', None) is None:
with io.BytesIO() as f:
# Note we force `mtime=0` here.
#
# This is because we need the compressed gz to be identical
# for the same CSV files, in order to use report resources
# manager.
with gzip.GzipFile(fileobj=f, mode='wb', mtime=0) as out:
# generate the probability columns
y_proba = self.proba
if len(y_proba.shape) == 2 and y_proba.shape[1] == 1:
y_proba = y_proba.reshape([-1])
if len(y_proba.shape) == 1:
proba_cols = [('p(class=1)', y_proba)]
else:
proba_cols = [('p(class=%d)' % i, y_proba[:, i])
for i in range(y_proba.shape[1])]
# generate the data frame
data = {'predict': self.predict, 'truth': self.truth}
for c, v in proba_cols:
data[c] = v
columns = ['predict', 'truth'] + [c for c, _ in proba_cols]
csv_df = | pd.DataFrame(data=data, columns=columns) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/5/18 9:53 AM
# @Author : R
# @File : TMDB_predict_2.py
# @Software: PyCharm
# @Software: PyCharm
# coding: utf-8
# # Kaggle for TMDB
# In[1]:
import numpy as np
import pandas as pd
import warnings
from tqdm import tqdm
from sklearn.preprocessing import LabelEncoder
import xgboost as xgb
import lightgbm as lgb
import catboost as cat
from collections import Counter
warnings.filterwarnings('ignore')
# get_ipython().run_line_magic('matplotlib', 'inline')
# Data description
# id:每部电影的唯一标志
# belongs_to_collection:json格式下每部电影的tmdb id, 电影名、电影海报和电影背景的URL
# budget:电影预算,数值为0表示未知
# genres:电影风格列表,json文件,包含id、name
# homepage:电影官方主页的URL
# imdb_id:该电影在imdb数据库中的唯一id标志
# original_language:电影制作的原始语言,长度为2的字符串
# original_title:电影的原始名称,可能与belong_to_collection中的名称不同
# overview: 剧情摘要
# popularity: 电影的受欢迎程度,float数值表示
# poster_path: 电影海报的URL
# production_companies:json格式,电影制造公司的id、name
# production_countries:json格式,电影制造国家 2字符简称、全称
# release_date:电影上映时间
# runtime:电影时长
# spoken_languages:电影语言版本,json格式
# status:电影是否已经发布
# tagline: 电影的标语
# title: 电影的英文名称
# keywords:电影关键字,json格式
# cast: json格式,演员列表,包括id,name,性别等
# crew:电影制作人员的信息,包括导演,作者等
# revenue:总收入,待预测值
# # EDA
# EDA已做
# 特征工程以及预测
# 利用两个额外的数据集合
# 1.TMDB Competition Additional Features:本数据包含新的三个特征popularity2、rating、totalVotes
# 2.TMDB Competition Additional Training Data:额外的2000个训练数据,没有给定训练集中所有的属性
# In[52]:
# Feature Engineering & Prediction
def rmse(y, y_pred):
return np.sqrt(mean_squared_error(y, y_pred))
# 数据预处理函数,包括将非数值型属性转化为数值型
def prepare(df):
global json_cols
global train_dict
df[['release_month', 'release_day', 'release_year']] = df['release_date'].str.split('/', expand=True).replace(
np.nan, 0).astype(int)
df['release_year'] = df['release_year']
df.loc[(df['release_year'] <= 19) & (df['release_year'] < 100), "release_year"] += 2000
df.loc[(df['release_year'] > 19) & (df['release_year'] < 100), "release_year"] += 1900
# 获取发行日期的星期、季度信息
releaseDate = pd.to_datetime(df['release_date'])
df['release_dayofweek'] = releaseDate.dt.dayofweek
df['release_quarter'] = releaseDate.dt.quarter
# 对rating、totalVotes属性进行填充
rating_na = df.groupby(["release_year", "original_language"])['rating'].mean().reset_index()
df[df.rating.isna()]['rating'] = df.merge(rating_na, how='left', on=["release_year", "original_language"])
vote_count_na = df.groupby(["release_year", "original_language"])['totalVotes'].mean().reset_index()
df[df.totalVotes.isna()]['totalVotes'] = df.merge(vote_count_na, how='left',
on=["release_year", "original_language"])
# df['rating'] = df['rating'].fillna(1.5)
# df['totalVotes'] = df['totalVotes'].fillna(6)
# 构建一个新属性,weightRating
df['weightedRating'] = (df['rating'] * df['totalVotes'] + 6.367 * 1000) / (df['totalVotes'] + 1000)
# 考虑到不同时期的面额意义不同,对其进行“通货膨胀”,通货膨胀比例为1.8%/年
df['originalBudget'] = df['budget']
df['inflationBudget'] = df['budget'] + df['budget'] * 1.8 / 100 * (
2018 - df['release_year']) # Inflation simple formula
df['budget'] = np.log1p(df['budget'])
# 对crew、cast属性中人员性别构成进行统计
df['genders_0_crew'] = df['crew'].apply(lambda x: sum([1 for i in x if i['gender'] == 0]))
df['genders_1_crew'] = df['crew'].apply(lambda x: sum([1 for i in x if i['gender'] == 1]))
df['genders_2_crew'] = df['crew'].apply(lambda x: sum([1 for i in x if i['gender'] == 2]))
df['genders_0_cast'] = df['cast'].apply(lambda x: sum([1 for i in x if i['gender'] == 0]))
df['genders_1_cast'] = df['cast'].apply(lambda x: sum([1 for i in x if i['gender'] == 1]))
df['genders_2_cast'] = df['cast'].apply(lambda x: sum([1 for i in x if i['gender'] == 2]))
# 对belongs_to_collection、Keywords、cast进行统计
df['_collection_name'] = df['belongs_to_collection'].apply(lambda x: x[0]['name'] if x != {} else 0)
le = LabelEncoder()
le.fit(list(df['_collection_name'].fillna('')))
df['_collection_name'] = le.transform(df['_collection_name'].fillna('').astype(str))
df['_num_Keywords'] = df['Keywords'].apply(lambda x: len(x) if x != {} else 0)
df['_num_cast'] = df['cast'].apply(lambda x: len(x) if x != {} else 0)
df['_num_crew'] = df['crew'].apply(lambda x: len(x) if x != {} else 0)
df['_popularity_mean_year'] = df['popularity'] / df.groupby("release_year")["popularity"].transform('mean')
df['_budget_runtime_ratio'] = df['budget'] / df['runtime']
df['_budget_popularity_ratio'] = df['budget'] / df['popularity']
# df['_budget_year_ratio'] = df['budget'] / (df['release_year'] * df['release_year'])
# df['_releaseYear_popularity_ratio'] = df['release_year'] / df['popularity']
# df['_releaseYear_popularity_ratio2'] = df['popularity'] / df['release_year']
df['_popularity_totalVotes_ratio'] = df['totalVotes'] / df['popularity']
df['_rating_popularity_ratio'] = df['rating'] / df['popularity']
df['_rating_totalVotes_ratio'] = df['totalVotes'] / df['rating']
# df['_totalVotes_releaseYear_ratio'] = df['totalVotes'] / df['release_year']
df['_budget_rating_ratio'] = df['budget'] / df['rating']
df['_runtime_rating_ratio'] = df['runtime'] / df['rating']
df['_budget_totalVotes_ratio'] = df['budget'] / df['totalVotes']
# 对是否有homepage分类
df['has_homepage'] = 1
df.loc[pd.isnull(df['homepage']), "has_homepage"] = 0
# 对belongs_to_collection是否为空分类
df['isbelongs_to_collectionNA'] = 0
df.loc[pd.isnull(df['belongs_to_collection']), "isbelongs_to_collectionNA"] = 1
# 对tagline是否为空分类
df['isTaglineNA'] = 0
df.loc[df['tagline'] == 0, "isTaglineNA"] = 1
# 对original——langues是否为English判定
df['isOriginalLanguageEng'] = 0
df.loc[df['original_language'] == "en", "isOriginalLanguageEng"] = 1
# 对电影名是否不同判定
df['isTitleDifferent'] = 1
df.loc[df['original_title'] == df['title'], "isTitleDifferent"] = 0
# 对电影是否上映判定
df['isMovieReleased'] = 1
df.loc[df['status'] != "Released", "isMovieReleased"] = 0
# 电影是否有摘要
df['isOverviewNA'] = 0
df.loc[pd.isnull(df['overview']), 'isOverviewNA'] = 1
# 获取collection id
df['collection_id'] = df['belongs_to_collection'].apply(lambda x: np.nan if len(x) == 0 else x[0]['id'])
# 对original——title等属性统计长度
df['original_title_letter_count'] = df['original_title'].str.len()
df['original_title_word_count'] = df['original_title'].str.split().str.len()
# 对title、overview、tagline统计长度或个数
df['title_word_count'] = df['title'].str.split().str.len()
df['overview_word_count'] = df['overview'].str.split().str.len()
df['tagline_word_count'] = df['tagline'].str.split().str.len()
df['len_title'] = df['title'].fillna('').apply(lambda x: len(str(x)))
# 对genres、production_conpany、country、cast、crew、spoken_languages统计
df['production_countries_count'] = df['production_countries'].apply(lambda x: len(x))
df['production_companies_count'] = df['production_companies'].apply(lambda x: len(x))
df['cast_count'] = df['cast'].apply(lambda x: len(x))
df['crew_count'] = df['crew'].apply(lambda x: len(x))
df['spoken_languages_count'] = df['spoken_languages'].apply(lambda x: len(x))
df['genres_count'] = df['genres'].apply(lambda x: len(x))
# 进行按年分组计算均值填充
df['meanruntimeByYear'] = df.groupby("release_year")["runtime"].aggregate('mean')
df['meanPopularityByYear'] = df.groupby("release_year")["popularity"].aggregate('mean')
df['meanBudgetByYear'] = df.groupby("release_year")["budget"].aggregate('mean')
df['meantotalVotesByYear'] = df.groupby("release_year")["totalVotes"].aggregate('mean')
df['meanTotalVotesByRating'] = df.groupby("rating")["totalVotes"].aggregate('mean')
df['medianBudgetByYear'] = df.groupby("release_year")["budget"].aggregate('median')
df['_popularity_theatrical_ratio'] = df['theatrical'] / df['popularity']
df['_budget_theatrical_ratio'] = df['budget'] / df['theatrical']
# runtime
df['runtime_cat_min_60'] = df['runtime'].apply(lambda x: 1 if (x <= 60) else 0)
df['runtime_cat_61_80'] = df['runtime'].apply(lambda x: 1 if (x > 60) & (x <= 80) else 0)
df['runtime_cat_81_100'] = df['runtime'].apply(lambda x: 1 if (x > 80) & (x <= 100) else 0)
df['runtime_cat_101_120'] = df['runtime'].apply(lambda x: 1 if (x > 100) & (x <= 120) else 0)
df['runtime_cat_121_140'] = df['runtime'].apply(lambda x: 1 if (x > 120) & (x <= 140) else 0)
df['runtime_cat_141_170'] = df['runtime'].apply(lambda x: 1 if (x > 140) & (x <= 170) else 0)
df['runtime_cat_171_max'] = df['runtime'].apply(lambda x: 1 if (x >= 170) else 0)
lang = df['original_language']
df_more_17_samples = [x[0] for x in Counter(pd.DataFrame(lang).stack()).most_common(17)]
for col in df_more_17_samples:
df[col] = df['original_language'].apply(lambda x: 1 if x == col else 0)
for col in range(1, 12):
df['month' + str(col)] = df['release_month'].apply(lambda x: 1 if x == col else 0)
# feature engeneering : Release date per quarter one hot encoding
for col in range(1, 4):
df['quarter' + str(col)] = df['release_quarter'].apply(lambda x: 1 if x == col else 0)
for col in range(1, 7):
df['dayofweek' + str(col)] = df['release_dayofweek'].apply(lambda x: 1 if x == col else 0)
# 新加入属性
df['is_release_day_of_1'] = 0
df.loc[df['release_day'] == 1, 'is_release_day_of_1'] = 1
df['is_release_day_of_15'] = 0
df.loc[df['release_day'] == 15, 'is_release_day_of_15'] = 1
# 新属性加入
# df['popularity2'] = np.log1p(df['popularity2'])
# df['popularity'] = np.log1p(df['popularity'])
# for col in range(1, 32):
# df['release_day' + str(col)] = df['release_day'].apply(lambda x: 1 if x == col else 0)
df['is_release_day_of_31'] = 0
df.loc[df['release_day'] == 31, 'is_release_day_of_15'] = 1
# popularity
# df['popularity_cat_25'] = df['popularity'].apply(lambda x: 1 if (x <= 25) else 0)
# df['popularity_cat_26_50'] = df['popularity'].apply(lambda x: 1 if (x > 25) & (x <= 50) else 0)
# df['popularity_cat_51_100'] = df['popularity'].apply(lambda x: 1 if (x > 50) & (x <= 100) else 0)
# df['popularity_cat_101_150'] = df['popularity'].apply(lambda x: 1 if (x > 100) & (x <= 150) else 0)
# df['popularity_cat_151_200'] = df['popularity'].apply(lambda x: 1 if (x > 150) & (x <= 200) else 0)
# df['popularity_cat_201_max'] = df['popularity'].apply(lambda x: 1 if (x >= 200) else 0)
#
# df['_runtime_totalVotes_ratio'] = df['runtime'] / df['totalVotes']
# df['_runtime_popularity_ratio'] = df['runtime'] / df['popularity']
#
# df['_rating_theatrical_ratio'] = df['theatrical'] / df['rating']
# df['_totalVotes_theatrical_ratio'] = df['theatrical'] / df['totalVotes']
# df['_budget_mean_year'] = df['budget'] / df.groupby("release_year")["budget"].transform('mean')
# df['_runtime_mean_year'] = df['runtime'] / df.groupby("release_year")["runtime"].transform('mean')
# df['_rating_mean_year'] = df['rating'] / df.groupby("release_year")["rating"].transform('mean')
# df['_totalVotes_mean_year'] = df['totalVotes'] / df.groupby("release_year")["totalVotes"].transform('mean')
# 对某些json属性,具有多个值的,进行类似‘one-hot编码’
for col in ['genres', 'production_countries', 'spoken_languages', 'production_companies', 'Keywords']:
df[col] = df[col].map(lambda x: sorted(
list(set([n if n in train_dict[col] else col + '_etc' for n in [d['name'] for d in x]])))).map(
lambda x: ','.join(map(str, x)))
temp = df[col].str.get_dummies(sep=',')
df = pd.concat([df, temp], axis=1, sort=False)
# 删除非数值属性和暂时未提出有用信息的属性
df.drop(['genres_etc'], axis=1, inplace=True)
df = df.drop(['belongs_to_collection', 'genres', 'homepage', 'imdb_id', 'overview','runtime'
, 'poster_path', 'production_companies', 'production_countries', 'release_date', 'spoken_languages'
, 'status', 'title', 'Keywords', 'cast', 'crew', 'original_language', 'original_title', 'tagline',
'collection_id'
], axis=1)
# 填充缺失值
df.fillna(value=0.0, inplace=True)
return df
# 对train中的某些数据手动处理
# 处理包括budget、revenue
# 对budget远小于revenue的情况统计,对其进行处理
# 处理原则,对于可以查询到的信息,进行真实数据填充,否则取当年同期同类型电影的均值
train = | pd.read_csv('train.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
import re
import calendar
import datetime, time
from datetime import timedelta
import urllib.request
import requests, json
from http.cookiejar import CookieJar
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from pandas import DataFrame
import pandas.io.sql as pdsql
from matplotlib import dates
import sqlite3
DATABASE = '..\\DATA\\mymoneybot.sqlite'
def sqliteconn():
conn = sqlite3.connect(DATABASE)
return conn
def get_webpage(url, encoding=""):
cj = CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36')]
respstr = ""
try:
op = opener.open(url)
sourcecode = op.read()
except Exception as e:
time.sleep(1)
op = opener.open(url)
sourcecode = op.read()
encodingmethod = op.info().get_param('charset')
if encodingmethod == None:
if encoding != "":
encodingmethod = encoding
if encoding != "":
encodingmethod = encoding
try:
respstr = sourcecode.decode(encoding=encodingmethod, errors='ignore')
except Exception as e:
respstr = sourcecode.decode(encoding="cp949", errors='ignore')
opener.close()
return respstr
def get_company_fundamental_fnguide(code):
def g(x):
if type(x) == str:
return datetime.datetime.strptime(x, '%Y-%m-%d')
else:
return x
# url = "http://comp.fnguide.com/SVO2/ASP/SVD_main.asp?pGB=1&gicode=A%s&cID=&MenuYn=Y&ReportGB=&NewMenuID=11&stkGb=&strResearchYN=" % (code)
url = "http://asp01.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A%s&NewMenuID=11&cID=50&MenuYn=N" % (code)
respstr = get_webpage(url, encoding="utf8")
# soup = BeautifulSoup(respstr)
soup = BeautifulSoup(respstr, "lxml")
# <!--IFRS 별도/연간 -->
target_table = soup.find("div", class_="um_table", id="highlight_B_Y")
# print(target_table)
result = []
try:
target_table.find_all('tr')
except Exception as e:
return (DataFrame(), | DataFrame() | pandas.DataFrame |
from datetime import datetime, time
from itertools import product
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
date_range,
period_range,
to_datetime,
)
import pandas.util.testing as tm
import pandas.tseries.offsets as offsets
@pytest.fixture(params=product([True, False], [True, False]))
def close_open_fixture(request):
return request.param
class TestDataFrameTimeSeriesMethods:
def test_pct_change(self, datetime_frame):
rs = datetime_frame.pct_change(fill_method=None)
tm.assert_frame_equal(rs, datetime_frame / datetime_frame.shift(1) - 1)
rs = datetime_frame.pct_change(2)
filled = datetime_frame.fillna(method="pad")
tm.assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = datetime_frame.pct_change(fill_method="bfill", limit=1)
filled = datetime_frame.fillna(method="bfill", limit=1)
tm.assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = datetime_frame.pct_change(freq="5D")
filled = datetime_frame.fillna(method="pad")
tm.assert_frame_equal(
rs, (filled / filled.shift(freq="5D") - 1).reindex_like(filled)
)
def test_pct_change_shift_over_nas(self):
s = Series([1.0, 1.5, np.nan, 2.5, 3.0])
df = DataFrame({"a": s, "b": s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])
edf = DataFrame({"a": expected, "b": expected})
tm.assert_frame_equal(chg, edf)
@pytest.mark.parametrize(
"freq, periods, fill_method, limit",
[
("5B", 5, None, None),
("3B", 3, None, None),
("3B", 3, "bfill", None),
("7B", 7, "pad", 1),
("7B", 7, "bfill", 3),
("14B", 14, None, None),
],
)
def test_pct_change_periods_freq(
self, datetime_frame, freq, periods, fill_method, limit
):
# GH 7292
rs_freq = datetime_frame.pct_change(
freq=freq, fill_method=fill_method, limit=limit
)
rs_periods = datetime_frame.pct_change(
periods, fill_method=fill_method, limit=limit
)
tm.assert_frame_equal(rs_freq, rs_periods)
empty_ts = DataFrame(index=datetime_frame.index, columns=datetime_frame.columns)
rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit)
rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit)
tm.assert_frame_equal(rs_freq, rs_periods)
def test_frame_ctor_datetime64_column(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
dates = np.asarray(rng)
df = DataFrame({"A": np.random.randn(len(rng)), "B": dates})
assert np.issubdtype(df["B"].dtype, np.dtype("M8[ns]"))
def test_frame_append_datetime64_column(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert np.issubdtype(df["A"].dtype, np.dtype("M8[ns]"))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="A-DEC")})
# it works!
repr(df)
def test_frame_append_datetime64_col_other_units(self):
n = 100
units = ["h", "m", "s", "ms", "D", "M", "Y"]
ns_dtype = np.dtype("M8[ns]")
for unit in units:
dtype = np.dtype("M8[{unit}]".format(unit=unit))
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype("O")).values
assert df[unit].dtype == ns_dtype
assert (df[unit].values == ex_vals).all()
# Test insertion into existing datetime64 column
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype("M8[{unit}]".format(unit=unit))
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp["dates"] = vals
ex_vals = to_datetime(vals.astype("O")).values
assert (tmp["dates"].values == ex_vals).all()
def test_asfreq(self, datetime_frame):
offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd())
rule_monthly = datetime_frame.asfreq("BM")
tm.assert_almost_equal(offset_monthly["A"], rule_monthly["A"])
filled = rule_monthly.asfreq("B", method="pad") # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq("B", method="pad") # noqa
# test does not blow up on length-0 DataFrame
zero_length = datetime_frame.reindex([])
result = zero_length.asfreq("BM")
assert result is not zero_length
def test_asfreq_datetimeindex(self):
df = DataFrame(
{"A": [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)],
)
df = df.asfreq("B")
assert isinstance(df.index, DatetimeIndex)
ts = df["A"].asfreq("B")
assert isinstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self):
# test for fill value during upsampling, related to issue 3715
# setup
rng = pd.date_range("1/1/2016", periods=10, freq="2S")
ts = pd.Series(np.arange(len(rng)), index=rng)
df = pd.DataFrame({"one": ts})
# insert pre-existing missing value
df.loc["2016-01-01 00:00:08", "one"] = None
actual_df = df.asfreq(freq="1S", fill_value=9.0)
expected_df = df.asfreq(freq="1S").fillna(9.0)
expected_df.loc["2016-01-01 00:00:08", "one"] = None
tm.assert_frame_equal(expected_df, actual_df)
expected_series = ts.asfreq(freq="1S").fillna(9.0)
actual_series = ts.asfreq(freq="1S", fill_value=9.0)
tm.assert_series_equal(expected_series, actual_series)
@pytest.mark.parametrize(
"data,idx,expected_first,expected_last",
[
({"A": [1, 2, 3]}, [1, 1, 2], 1, 2),
({"A": [1, 2, 3]}, [1, 2, 2], 1, 2),
({"A": [1, 2, 3, 4]}, ["d", "d", "d", "d"], "d", "d"),
({"A": [1, np.nan, 3]}, [1, 1, 2], 1, 2),
({"A": [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2),
({"A": [1, np.nan, 3]}, [1, 2, 2], 1, 2),
],
)
def test_first_last_valid(
self, float_frame, data, idx, expected_first, expected_last
):
N = len(float_frame.index)
mat = np.random.randn(N)
mat[:5] = np.nan
mat[-5:] = np.nan
frame = DataFrame({"foo": mat}, index=float_frame.index)
index = frame.first_valid_index()
assert index == frame.index[5]
index = frame.last_valid_index()
assert index == frame.index[-6]
# GH12800
empty = DataFrame()
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
# GH17400: no valid entries
frame[:] = np.nan
assert frame.last_valid_index() is None
assert frame.first_valid_index() is None
# GH20499: its preserves freq with holes
frame.index = date_range("20110101", periods=N, freq="B")
frame.iloc[1] = 1
frame.iloc[-2] = 1
assert frame.first_valid_index() == frame.index[1]
assert frame.last_valid_index() == frame.index[-2]
assert frame.first_valid_index().freq == frame.index.freq
assert frame.last_valid_index().freq == frame.index.freq
# GH 21441
df = DataFrame(data, index=idx)
assert expected_first == df.first_valid_index()
assert expected_last == df.last_valid_index()
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_first_valid_index_all_nan(self, klass):
# GH#9752 Series/DataFrame should both return None, not raise
obj = klass([np.nan])
assert obj.first_valid_index() is None
assert obj.iloc[:0].first_valid_index() is None
def test_first_subset(self):
ts = tm.makeTimeDataFrame(freq="12h")
result = ts.first("10d")
assert len(result) == 20
ts = tm.makeTimeDataFrame(freq="D")
result = ts.first("10d")
assert len(result) == 10
result = ts.first("3M")
expected = ts[:"3/31/2000"]
tm.assert_frame_equal(result, expected)
result = ts.first("21D")
expected = ts[:21]
tm.assert_frame_equal(result, expected)
result = ts[:0].first("3M")
tm.assert_frame_equal(result, ts[:0])
def test_first_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.first("1D")
def test_last_subset(self):
ts = tm.makeTimeDataFrame(freq="12h")
result = ts.last("10d")
assert len(result) == 20
ts = tm.makeTimeDataFrame(nper=30, freq="D")
result = ts.last("10d")
assert len(result) == 10
result = ts.last("21D")
expected = ts["2000-01-10":]
tm.assert_frame_equal(result, expected)
result = ts.last("21D")
expected = ts[-21:]
tm.assert_frame_equal(result, expected)
result = ts[:0].last("3M")
tm.assert_frame_equal(result, ts[:0])
def test_last_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.last("1D")
def test_at_time(self):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time("9:30")
expected = ts.at_time(time(9, 30))
tm.assert_frame_equal(result, expected)
result = ts.loc[time(9, 30)]
expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range("1/1/2000", "1/31/2000")
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
tm.assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range("1/1/2012", freq="23Min", periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time("16:00")
assert len(rs) == 0
@pytest.mark.parametrize(
"hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)]
)
def test_at_time_errors(self, hour):
# GH 24043
dti = pd.date_range("2018", periods=3, freq="H")
df = pd.DataFrame(list(range(len(dti))), index=dti)
if getattr(hour, "tzinfo", None) is None:
result = df.at_time(hour)
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(ValueError, match="Index must be timezone"):
df.at_time(hour)
def test_at_time_tz(self):
# GH 24043
dti = pd.date_range("2018", periods=3, freq="H", tz="US/Pacific")
df = pd.DataFrame(list(range(len(dti))), index=dti)
result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern")))
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
def test_at_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.at_time("00:00")
@pytest.mark.parametrize("axis", ["index", "columns", 0, 1])
def test_at_time_axis(self, axis):
# issue 8839
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
ts.index, ts.columns = rng, rng
indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]
if axis in ["index", 0]:
expected = ts.loc[indices, :]
elif axis in ["columns", 1]:
expected = ts.loc[:, indices]
result = ts.at_time("9:30", axis=axis)
tm.assert_frame_equal(result, expected)
def test_between_time(self, close_open_fixture):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
inc_start, inc_end = close_open_fixture
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time("00:00", "01:00")
expected = ts.between_time(stime, etime)
tm.assert_frame_equal(result, expected)
# across midnight
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.between_time(start_time="00:00", end_time="12:00")
def test_between_time_axis(self, axis):
# issue 8839
rng = date_range("1/1/2000", periods=100, freq="10min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
stime, etime = ("08:00:00", "09:00:00")
exp_len = 7
if axis in ["index", 0]:
ts.index = rng
assert len(ts.between_time(stime, etime)) == exp_len
assert len(ts.between_time(stime, etime, axis=0)) == exp_len
if axis in ["columns", 1]:
ts.columns = rng
selected = ts.between_time(stime, etime, axis=1).columns
assert len(selected) == exp_len
def test_between_time_axis_raises(self, axis):
# issue 8839
rng = date_range("1/1/2000", periods=100, freq="10min")
mask = np.arange(0, len(rng))
rand_data = np.random.randn(len(rng), len(rng))
ts = DataFrame(rand_data, index=rng, columns=rng)
stime, etime = ("08:00:00", "09:00:00")
msg = "Index must be DatetimeIndex"
if axis in ["columns", 1]:
ts.index = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime)
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=0)
if axis in ["index", 0]:
ts.columns = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=1)
def test_operation_on_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = pd.DataFrame({"foo": [pd.NaT, pd.NaT, pd.Timestamp("2012-05-01")]})
res = df.min()
exp = pd.Series([pd.Timestamp("2012-05-01")], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.Timestamp("2012-05-01")], index=["foo"])
tm.assert_series_equal(res, exp)
# GH12941, only NaTs are in DataFrame.
df = pd.DataFrame({"foo": [pd.NaT, pd.NaT]})
res = df.min()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
def test_datetime_assignment_with_NaT_and_diff_time_units(self):
# GH 7492
data_ns = np.array([1, "nat"], dtype="datetime64[ns]")
result = pd.Series(data_ns).to_frame()
result["new"] = data_ns
expected = pd.DataFrame(
{0: [1, None], "new": [1, None]}, dtype="datetime64[ns]"
)
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, "nat"], dtype="datetime64[s]")
result["new"] = data_s
expected = pd.DataFrame(
{0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]"
)
tm.assert_frame_equal(result, expected)
def test_frame_to_period(self):
K = 5
dr = date_range("1/1/2000", "1/1/2001")
pr = period_range("1/1/2000", "1/1/2001")
df = DataFrame(np.random.randn(len(dr), K), index=dr)
df["mix"] = "a"
pts = df.to_period()
exp = df.copy()
exp.index = pr
tm.assert_frame_equal(pts, exp)
pts = df.to_period("M")
tm.assert_index_equal(pts.index, exp.index.asfreq("M"))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
tm.assert_frame_equal(pts, exp)
pts = df.to_period("M", axis=1)
tm.assert_index_equal(pts.columns, exp.columns.asfreq("M"))
msg = "No axis named 2 for object type <class 'pandas.core.frame.DataFrame'>"
with pytest.raises(ValueError, match=msg):
df.to_period(axis=2)
@pytest.mark.parametrize("fn", ["tz_localize", "tz_convert"])
def test_tz_convert_and_localize(self, fn):
l0 = date_range("20140701", periods=5, freq="D")
l1 = date_range("20140701", periods=5, freq="D")
int_idx = Index(range(5))
if fn == "tz_convert":
l0 = l0.tz_localize("UTC")
l1 = l1.tz_localize("UTC")
for idx in [l0, l1]:
l0_expected = getattr(idx, fn)("US/Pacific")
l1_expected = getattr(idx, fn)("US/Pacific")
df1 = DataFrame(np.ones(5), index=l0)
df1 = getattr(df1, fn)("US/Pacific")
tm.assert_index_equal(df1.index, l0_expected)
# MultiIndex
# GH7846
df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))
df3 = getattr(df2, fn)("US/Pacific", level=0)
assert not df3.index.levels[0].equals(l0)
tm.assert_index_equal(df3.index.levels[0], l0_expected)
tm.assert_index_equal(df3.index.levels[1], l1)
assert not df3.index.levels[1].equals(l1_expected)
df3 = getattr(df2, fn)("US/Pacific", level=1)
tm.assert_index_equal(df3.index.levels[0], l0)
assert not df3.index.levels[0].equals(l0_expected)
tm.assert_index_equal(df3.index.levels[1], l1_expected)
assert not df3.index.levels[1].equals(l1)
df4 = DataFrame(np.ones(5), | MultiIndex.from_arrays([int_idx, l0]) | pandas.MultiIndex.from_arrays |
import pandas as pd
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtWidgets import QTableWidget, QTableWidgetItem, QPlainTextEdit, QSlider, QWidget, QVBoxLayout, QLabel, \
QHBoxLayout, QPushButton
from util.langUtil import check_if_valid_timestr
def get_datatable_sheet(table: QTableWidget):
map = []
for i in range(table.rowCount()):
row = []
for u in range(table.columnCount()):
if table.item(i, u):
row.append(table.item(i, u).text())
else:
row.append("")
map.append(row)
# Ignore not-full rows AND symbol/interval not allowed!
map = [row for row in map if row[0] and row[1] and row[2]
and check_if_valid_timestr(row[1]) and check_if_valid_timestr(row[2])]
data = {
'symbol': [row[0] for row in map],
'interval': [row[1] for row in map],
'period': [row[2] for row in map],
}
df = | pd.DataFrame(data) | pandas.DataFrame |
## import things
import ray
import pandas as pd
from prophet import Prophet
## data pre-processing
df = | pd.read_csv('./yellow_tripdata_2021-01.csv') | pandas.read_csv |
# This is a test file intended to be used with pytest
# pytest automatically runs all the function starting with "test_"
# see https://docs.pytest.org for more information
import os
import sys
import numpy as np
import pandas as pd
## Add stuff to the path to enable exec outside of DSS
plugin_root = os.path.dirname(os.path.dirname(os.path.dirname((os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))))
sys.path.append(os.path.join(plugin_root, 'python-lib'))
import dku_timeseries
JUST_BEFORE_SPRING_DST = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
JUST_BEFORE_FALL_DST = pd.Timestamp('20191027 02:59:00').tz_localize('CET',
ambiguous=True) # It's ambiguous because there are 2 instants with these dates! We select the first
TIME_COL = 'time_col'
DATA_COL = 'data_col'
GROUP_COL = 'group_col'
### Helpers to create test data, should be fixtures at some point I guess
def _make_df_with_one_col(column_data, period=pd.DateOffset(seconds=1), start_time=JUST_BEFORE_SPRING_DST):
from datetime import datetime
top = datetime.now()
time = pd.date_range(start_time, None, len(column_data), period)
top = datetime.now()
df = pd.DataFrame({TIME_COL: time, DATA_COL: column_data})
return df
def _make_window_aggregator_params():
params = dku_timeseries.WindowAggregatorParams(window_width=3)
return params
def _make_window_aggregator():
params = _make_window_aggregator_params()
return dku_timeseries.WindowAggregator(params)
def _make_extrema_extraction_params():
window = _make_window_aggregator()
params = dku_timeseries.ExtremaExtractorParams(window)
return params
def _make_extrema_extractor():
params = _make_extrema_extraction_params()
return dku_timeseries.ExtremaExtractor(params)
### Test cases
class TestExtremaExtraction:
def test_empty_df(self):
df = _make_df_with_one_col([])
extrema_extractor = _make_extrema_extractor()
output_df = extrema_extractor.compute(df, TIME_COL, DATA_COL, [GROUP_COL])
assert output_df.shape == (0, 2)
def test_single_row_df(self):
df = _make_df_with_one_col([33])
extrema_extractor = _make_extrema_extractor()
output_df = extrema_extractor.compute(df, TIME_COL, DATA_COL, [GROUP_COL])
assert output_df.shape == (1, 2)
assert output_df[DATA_COL][0] == df[DATA_COL][0]
def test_incremental_df(self):
length = 100
data = [x for x in range(length)]
df = _make_df_with_one_col(data)
print(df.shape)
extrema_extractor = _make_extrema_extractor()
output_df = extrema_extractor.compute(df, TIME_COL, DATA_COL)
assert (output_df[DATA_COL][0]) == 99
assert (output_df[DATA_COL + '_min'][0]) == 96 # window width = 3
def test_extrema_without_neighbors(self):
length = 100
data = [x for x in range(length)]
df = _make_df_with_one_col(data)
window_aggregator = dku_timeseries.WindowAggregator(dku_timeseries.WindowAggregatorParams(window_unit='milliseconds'))
params = dku_timeseries.ExtremaExtractorParams(window_aggregator=window_aggregator)
extrema_extractor = dku_timeseries.ExtremaExtractor(params)
output_df = extrema_extractor.compute(df, TIME_COL, DATA_COL)
# only have DATE_TIME col and DATA_COL of the extrema, no stats because no neighbors
assert output_df.shape == (1, 2)
assert output_df[DATA_COL][0] == 99
def test_group_extrema_without_neighbors(self):
start_time_1 = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
start_time_2 = pd.Timestamp('20190131 02:00:00').tz_localize('CET')
start_time_list = [start_time_1, start_time_2]
len1 = 100
len2 = 10
data1 = range(len1)
data2 = range(len2)
data_list = [data1, data2]
period1 = pd.DateOffset(seconds=1)
period2 = pd.DateOffset(seconds=1)
period_list = [period1, period2]
df_list = []
for group_id, data, period, start_time in zip(range(len(data_list)), data_list, period_list, start_time_list):
group_name = 'group_{}'.format(group_id)
temp_df = _make_df_with_one_col(data, period=period, start_time=start_time)
temp_df[GROUP_COL] = group_name
df_list.append(temp_df)
df = pd.concat(df_list, axis=0)
window_aggregator = dku_timeseries.WindowAggregator(dku_timeseries.WindowAggregatorParams(window_unit='milliseconds'))
params = dku_timeseries.ExtremaExtractorParams(window_aggregator=window_aggregator)
extrema_extractor = dku_timeseries.ExtremaExtractor(params)
output_df = extrema_extractor.compute(df, TIME_COL, DATA_COL, groupby_columns=[GROUP_COL])
assert output_df.shape == (2, 3)
assert np.array_equal(output_df[DATA_COL], [99, 9])
def test_incremental_group_df(self):
start_time_1 = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
start_time_2 = pd.Timestamp('20190131 02:00:00').tz_localize('CET')
start_time_list = [start_time_1, start_time_2]
len1 = 100
len2 = 10
data1 = range(len1)
data2 = range(len2)
data_list = [data1, data2]
period1 = | pd.DateOffset(seconds=1) | pandas.DateOffset |
"""
==============================================================
Cho2017 - Parameters optimization: Ensemble - FUCONE
===============================================================
This module is design to select the best ensemble configuration that enhances the accuracy
"""
# Authors: <NAME> <<EMAIL>>,
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.base import clone
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.ensemble import StackingClassifier
from sklearn.linear_model import (
LogisticRegression,
)
from sklearn.metrics import balanced_accuracy_score, roc_auc_score, cohen_kappa_score
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
from pyriemann.spatialfilters import CSP
from pyriemann.tangentspace import TangentSpace
from pyriemann.classification import MDM, FgMDM
from moabb.datasets import (
Cho2017,
BNCI2014001,
PhysionetMI,
)
from moabb.paradigms import LeftRightImagery
from moabb.pipelines.csp import TRCSP
from fc_pipeline import (
FunctionalTransformer,
EnsureSPD,
FC_DimRed,
GetData,
)
##
if os.path.basename(os.getcwd()) == "FUCONE":
os.chdir("Database")
basedir = os.getcwd()
datasets = [Cho2017(), BNCI2014001(), PhysionetMI()]
spectral_met = ["cov", "imcoh", "plv", "wpli2_debiased", "instantaneous"]
print(
"#################" + "\n"
"List of pre-selected FC metrics: " + "\n" + str(spectral_met) + "\n"
"#################"
)
freqbands = {"defaultBand": [8, 35]}
print(
"#################" + "\n"
"List of pre-selected Frequency bands: " + "\n" + str(freqbands) + "\n"
"#################"
)
threshold = [0.05]
percent_nodes = [10, 20, 30]
print(
"#################" + "\n"
"List of pre-selected thresholds: " + "\n" + str(threshold) + "\n"
"List of pre-selected number of nodes: " + "\n" + str(percent_nodes) + "\n"
"#################"
)
#%% Baseline evaluations
bs_fmin, bs_fmax = 8, 35
ft = FunctionalTransformer(delta=1, ratio=0.5, method="cov", fmin=bs_fmin, fmax=bs_fmax)
step_trcsp = [("ft", ft), ("trcsp", TRCSP(nfilter=6)), ("lda", LDA())]
step_regcsp = [
("ft", ft),
("csp", CSP(nfilter=6)),
("lda", LDA(solver="lsqr", shrinkage="auto")),
]
step_csp = [
("ft", ft),
("csp", CSP(nfilter=6)),
(
"optsvm",
GridSearchCV(SVC(), {"kernel": ("linear", "rbf"), "C": [0.1, 1, 10]}, cv=3),
),
]
step_mdm = [("ft", ft), ("fgmdm", FgMDM(metric="riemann", tsupdate=False))]
step_cov = [
("spd", EnsureSPD()),
("tg", TangentSpace(metric="riemann")),
(
"LogistReg",
LogisticRegression(
penalty="elasticnet", l1_ratio=0.15, intercept_scaling=1000.0, solver="saga"
),
),
]
step_fc = [
("tg", TangentSpace(metric="riemann")),
(
"LogistReg",
LogisticRegression(
penalty="elasticnet", l1_ratio=0.15, intercept_scaling=1000.0, solver="saga"
),
),
]
#%% Specific evaluation for ensemble learning
for d in datasets:
subj = d.subject_list
path_csv_root = basedir + "/1_Dataset-csv/" + d.code.replace(" ", "-")
path_data_root = basedir + "/2_Dataset-npz/" + d.code.replace(" ", "-")
path_data_root_chan = path_data_root + "/Chan_select/"
path_figures_root = basedir + "/0_Figures/" + d.code.replace(" ", "-")
os.chdir(path_data_root)
dataset_res = list()
for f in freqbands:
fmin = freqbands[f][0]
fmax = freqbands[f][1]
subjects = subj
for subject in tqdm(subjects, desc="subject"):
fmin = freqbands["defaultBand"][0]
fmax = freqbands["defaultBand"][1]
paradigm = LeftRightImagery(fmin=fmin, fmax=fmax)
ep_, _, _ = paradigm.get_data(
dataset=d, subjects=[subj[1]], return_epochs=True
)
nchan = ep_.info["nchan"]
nb_nodes = [int(p / 100.0 * nchan) for p in percent_nodes]
ppl_DR, ppl_noDR, ppl_ens, baseline_ppl = {}, {}, {}, {}
gd = GetData(paradigm, d, subject)
baseline_ppl["TRCSP+LDA"] = Pipeline(steps=[("gd", gd)] + step_trcsp)
baseline_ppl["RegCSP+shLDA"] = Pipeline(steps=[("gd", gd)] + step_regcsp)
baseline_ppl["CSP+optSVM"] = Pipeline(steps=[("gd", gd)] + step_csp)
baseline_ppl["FgMDM"] = Pipeline(steps=[("gd", gd)] + step_mdm)
for sm in spectral_met:
ft = FunctionalTransformer(
delta=1, ratio=0.5, method=sm, fmin=fmin, fmax=fmax
)
if sm == "cov":
ppl_DR["cov+elasticnet"] = Pipeline(
steps=[("gd", gd), ("sm", ft)] + step_cov
)
ppl_noDR["cov+elasticnet"] = Pipeline(
steps=[("gd", gd), ("sm", ft)] + step_cov
)
else:
ft_DR = FC_DimRed(
threshold=threshold,
nb_nodes=nb_nodes,
classifier=FgMDM(metric="riemann", tsupdate=False),
)
pname_postDR = sm + "+DR+elasticnet"
ppl_DR[pname_postDR] = Pipeline(
steps=[
("gd", gd),
("sm", ft),
("spd", EnsureSPD()),
("DR", ft_DR),
]
+ step_fc
)
pname_noDR = sm + "+elasticnet"
ppl_noDR[pname_noDR] = Pipeline(
steps=[("gd", gd), ("sm", ft), ("spd", EnsureSPD()),] + step_fc
)
################ Ensemble from single features classif with elasticnet ################
DR_estimators = [(n, ppl_DR[n]) for n in ppl_DR]
noDR_estimators = [(n, ppl_noDR[n]) for n in ppl_noDR]
cvkf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
# ensemble with elasticnet
elastic_estimator = LogisticRegression(
penalty="elasticnet",
l1_ratio=0.15,
intercept_scaling=1000.0,
solver="saga",
)
scl_elastic_DR = StackingClassifier(
estimators=DR_estimators,
cv=cvkf,
n_jobs=1,
final_estimator=elastic_estimator,
stack_method="predict_proba",
)
ppl_ens["ensemble-DR"] = scl_elastic_DR
scl_elastic_noDR = StackingClassifier(
estimators=noDR_estimators,
cv=cvkf,
n_jobs=1,
final_estimator=elastic_estimator,
stack_method="predict_proba",
)
ppl_ens["ensemble-noDR"] = scl_elastic_noDR
all_ppl = {**baseline_ppl, **ppl_ens}
###########################################################################
# Train and evaluate
ep_, y, metadata = paradigm.get_data(d, [subject], return_epochs=True)
X = np.arange(len(y))
for session in np.unique(metadata.session):
ix = metadata.session == session
cv = StratifiedKFold(5, shuffle=True, random_state=42)
le = LabelEncoder()
y_cv = le.fit_transform(y[ix])
X_ = X[ix]
y_ = y_cv
for idx, (train, test) in enumerate(cv.split(X_, y_)):
for ppn, ppl in tqdm(
all_ppl.items(), total=len(all_ppl), desc="pipelines"
):
cvclf = clone(ppl)
cvclf.fit(X_[train], y_[train])
yp = cvclf.predict(X_[test])
acc = balanced_accuracy_score(y_[test], yp)
auc = roc_auc_score(y_[test], yp)
kapp = cohen_kappa_score(y_[test], yp)
res_info = {
"subject": subject,
"session": "session_0",
"channels": nchan,
"n_sessions": 1,
"FreqBand": "defaultBand",
"dataset": d.code.replace(" ", "-"), # DONE change d.code
"fmin": fmin,
"fmax": fmax,
"samples": len(y_),
"time": 0.0,
"split": idx,
}
res = {
"score": auc,
"kappa": kapp,
"accuracy": acc,
"pipeline": ppn,
"n_dr": nchan,
"thres": 0,
**res_info,
}
dataset_res.append(res)
if isinstance(ppl, StackingClassifier):
for est_n, est_p in cvclf.named_estimators_.items():
p = est_p.get_params()
for step_est in p["steps"]:
if isinstance(step_est[1], FC_DimRed):
thres, n_dr = p[step_est[0]].best_param_
break
else:
thres, n_dr = 0, nchan
ype = est_p.predict(X_[test])
acc = balanced_accuracy_score(y_[test], ype)
auc = roc_auc_score(y_[test], ype)
kapp = cohen_kappa_score(y_[test], ype)
res = {
"score": auc,
"kappa": kapp,
"accuracy": acc,
"pipeline": est_n,
"thres": thres,
"n_dr": n_dr,
**res_info,
}
dataset_res.append(res)
dataset_res = | pd.DataFrame(dataset_res) | pandas.DataFrame |
import pytest
import gpmap
from epistasis import models
import numpy as np
import pandas as pd
import os
def test__genotypes_to_X(test_data):
# Make sure function catches bad genotype passes
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
# Duplicated
g = list(gpm.genotype)
g.extend(g)
# not in gpmap
b = list(gpm.genotype)
b.append("stupid")
bad_genotypes = [g,b]
for bad in bad_genotypes:
with pytest.raises(ValueError):
models.base._genotypes_to_X(bad,gpm,order=1,model_type="local")
# Sample through various model comobos
allowed = {"local":set([0,1]),
"global":set([-1,1])}
for d in test_data:
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
for i in range(1,gpm.length+1,1):
for model_type in ["local","global"]:
X = models.base._genotypes_to_X(gpm.genotype,
gpm,
order=i,
model_type=model_type)
assert X.shape[0] == len(gpm.genotype)
assert set(np.unique(X)).issubset(allowed[model_type])
def test_arghandler_decorator():
class Yo:
def _a(self,data=5,method=None):
return data
def _b(self,data=None,method=None):
return 6
@models.base.arghandler
def test_method(self,a=None,b=None,**kwargs):
return a, b
@models.base.arghandler
def bad_method(self,c=None,d=None,**kwargs):
return c, d
yo = Yo()
assert yo.test_method() == (None,6)
assert yo.test_method(a=5) == (5,6)
assert yo.test_method(a=10) == (10,6)
assert yo.test_method(b=10) == (None,6)
with pytest.raises(AttributeError):
yo.bad_method()
### Tests for AbstractModel:
# AbstractModel cannot be instantiated on its own, as it is designed to be a
# mixin with sklearn classes. Many methods have to be defined in subclass
# (.fit, .predict, etc.) These will not be tested here, but instead in the
# subclass tests. For methods defined here that are never redefined in subclass
# (._X, .add_gpm, etc.) we test using the simplest mixed/subclass
# (EpistasisLinearRegression).
def test_abstractmodel_predict_to_df(test_data):
"""
Test basic functionality. Real test of values will be done on .predict
for subclasses.
"""
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
# This should fail -- no fit run
with pytest.raises(Exception):
df = m.predict_to_df()
m.fit()
# This should work
df = m.predict_to_df()
assert type(df) is type(pd.DataFrame())
assert len(df) == len(d["genotype"])
# Create and fit a new model.
m = models.linear.EpistasisLinearRegression()
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
# No gpm added -- should fail
with pytest.raises(RuntimeError):
m.predict_to_df()
m.add_gpm(gpm)
m.fit()
df = m.predict_to_df(genotypes=d["genotype"][0])
assert len(df) == 1
bad_stuff = [1,{},[1,2],"STUPID",["STUPID","IS","REAL"]]
for b in bad_stuff:
with pytest.raises(ValueError):
print(f"Trying bad genotypes {b}")
m.predict_to_df(genotypes=b)
df = m.predict_to_df(genotypes=d["genotype"][:3])
assert len(df) == 3
def test_abstractmodel_predict_to_csv(test_data,tmp_path):
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
m.fit()
csv_file = os.path.join(tmp_path,"tmp.csv")
m.predict_to_csv(filename=csv_file)
assert os.path.exists(csv_file)
df = pd.read_csv(csv_file)
assert len(df) == len(d["genotype"])
# Make sure genotypes pass works
m.predict_to_csv(filename=csv_file,genotypes=d["genotype"][0])
assert os.path.exists(csv_file)
df = | pd.read_csv(csv_file) | pandas.read_csv |
import time
import sqlite3
import os
import hashlib
import traceback
import pandas as pd
from flask import Flask, request, json, render_template, send_from_directory, abort, g
from passlib.apps import custom_app_context as pwd_context
from flask_httpauth import HTTPBasicAuth, HTTPTokenAuth
auth = HTTPBasicAuth()
tokenauth = HTTPTokenAuth()
working_directory = os.path.dirname(__file__)
app = Flask(__name__)
tokens = dict()
master_database = 'master.db'
def create_heatmap(schedules):
"""Represents upcoming tasks as a calendar heatmap."""
total = []
for item, schedule in schedules:
for day in schedule:
total.append((day, 1, item))
schedule_df = pd.DataFrame(total, columns=['date', 'check', 'item'])
schedule_df.index = schedule_df['date']
schedule_df = schedule_df.drop(columns=['date'])
resampled = schedule_df.resample('D').agg({'check': 'sum', 'item': list})
resampled = resampled[resampled['check'] > 0].reset_index()
return resampled
def generate_upcoming_tasks(merged, exclude_past=True):
"""Generates upcoming tasks given information about last checked dates and master data."""
today = pd.Timestamp.today()
schedules = []
for _, row in merged.iterrows():
schedule = pd.date_range(row['date_checked'], today+pd.Timedelta(13, 'W'), freq=f'{row["frequency"]*7}D')
schedule = schedule[1:]
if len(schedule) == 0:
continue
if exclude_past:
schedule = schedule[schedule >= today]
schedules.append((row['item'], schedule))
return schedules
def get_user_database_name(username):
connection = sqlite3.connect(os.path.join(working_directory, master_database))
df = pd.read_sql('select database from user_to_database where username = :username',
con = connection, params = {"username": username})
return df['database'].iloc[0]
def inspect_inventory_log(username):
"""Gathers observations and master data."""
today = pd.Timestamp.today()
user_database = get_user_database_name(username)
connection = sqlite3.connect(os.path.join(working_directory, user_database))
checks = pd.read_sql('SELECT * from inventory_log', con = connection)
checks['date'] = pd.to_datetime(checks['date'])
checks = checks.sort_values('date')
last_checked = checks.groupby(['item']).last().reset_index()
master_data = pd.read_sql('SELECT * from master_data', con = connection)
recent_master_data = master_data.sort_values('date_added').groupby('item').last().reset_index()
merged = recent_master_data.merge(last_checked, on='item', suffixes=('_initial','_checked'))
merged['week_difference'] = (today - merged['date_checked']).dt.days/7
merged['need_to_check'] = merged['week_difference'] > merged['frequency']
return merged
@auth.verify_password
def verify_password(username, password):
connection = sqlite3.connect(os.path.join(working_directory, master_database))
users = pd.read_sql('select * from users where username=:username',
con = connection,
params={"username": username})
if len(users) == 0:
return False
encrypted_password = users['password'].iloc[0]
g.user = username
return pwd_context.verify(password, encrypted_password)
@tokenauth.verify_token
def verify_token(token):
today = pd.Timestamp.today()
if token in tokens:
if tokens[token]['expiry'] > today:
g.user = tokens[token]['username']
return True
else:
tokens.pop(token, None)
return False
def create_user(username, password):
"""Creates a user in the database including its own set of tables."""
connection = sqlite3.connect(os.path.join(working_directory, master_database))
try:
existing_users = pd.read_sql('select * from users', con = connection)
except:
existing_users = []
current_id = len(existing_users) + 1 # we don't depend on id input anywhere so it's fine to not use better UUIDs
if len(existing_users) > 0:
if username in set(existing_users['username']):
return False
user = pd.DataFrame()
user['username'] = [username]
user['password'] = [pwd_context.hash(password)] # encryption
user['active'] = [True]
user['id'] = [current_id]
user.to_sql('users', con = connection, if_exists='append')
new_db = f'user{current_id}.db'
user_db_mapping = pd.DataFrame()
user_db_mapping['username'] = [username]
user_db_mapping['database'] = [new_db]
user_db_mapping.to_sql('user_to_database', con = connection, if_exists='append')
return True
@app.route('/')
def hello_world():
return render_template("index.html")
@app.route('/users/login', methods=['GET'])
@auth.login_required
def login_user():
today = pd.Timestamp.today()
current_tokens = list(tokens.keys())
for token in current_tokens:
if tokens[token]['expiry'] < today:
tokens.pop(token, None)
expiry = today + pd.Timedelta(11, 'H')
frontend_expiry = int((time.time() + (60*60*11)) * 1000)
token_string = hashlib.sha256((g.user+str(today)).encode()).hexdigest()
token = {'username': g.user, 'expiry': expiry}
print(token)
tokens[token_string] = token
return json.jsonify({'token_created': token_string, 'username': g.user, 'token_expiry': frontend_expiry})
@app.route('/users/register', methods=['POST'])
def register_user():
print(request)
username = request.form.get('username')
password = request.form.get('password')
if username is None or password is None:
abort(400)
created = create_user(username, password)
return json.jsonify({ 'username_created': created })
@app.route('/suggestion')
@tokenauth.login_required
def rx_suggestion():
"""Queries the DB for all rx names and return them to be used as suggestions"""
user_database = get_user_database_name(g.user)
try:
connection = sqlite3.connect(os.path.join(working_directory, user_database))
inventory = pd.read_sql('SELECT DISTINCT item from inventory_log', con = connection)
suggestions_dict = inventory.to_dict(orient='list')
print(suggestions_dict)
except:
suggestions_dict = {'item': []}
return json.jsonify(suggestions_dict)
@app.route('/search/<name>')
@tokenauth.login_required
def search_rx(name):
"""Queries the DB for the relevant rows, based on search bar"""
user_database = get_user_database_name(g.user)
try:
connection = sqlite3.connect(os.path.join(working_directory, user_database))
inventory = pd.read_sql('SELECT * from inventory_log', con = connection)
low_name = name.lower()
sub_inventory = inventory[inventory['item'].str.lower() == low_name]
actual_name = sub_inventory['item'].iloc[0]
checks_count = len(sub_inventory.index)
print(checks_count)
search_return_dict = {"checks_count": checks_count}
# What else should we return when someone asks for information about an item?
# TODO: next_check
search_return_dict["item"] = [actual_name]
search_return_dict["last_checked"] = sub_inventory["date"].max()
merged = inspect_inventory_log(username = g.user)
need_to_check = merged[merged['item'] == actual_name].iloc[0]['need_to_check'].astype(str)
search_return_dict["need_to_check"] = need_to_check
# Maybe also add the median time between checks
except:
search_return_dict = {'item': []}
return json.jsonify(search_return_dict)
@app.route('/add_item', methods=['POST'])
@tokenauth.login_required
def add_item(inventory_checked=True):
today = pd.Timestamp.today()
username = g.user
df = pd.DataFrame()
df['item'] = [request.form.get('name')]
df['date'] = pd.to_datetime([request.form.get('date')])
df['frequency'] = [int(request.form.get('frequency'))]
df['date_added'] = [today]
print(df)
user_database = get_user_database_name(username)
connection = sqlite3.connect(os.path.join(working_directory, user_database))
df.to_sql('master_data', con=connection, if_exists='append', index=False)
if inventory_checked:
df[['date', 'item']].to_sql('inventory_log', con=connection, if_exists='append', index=False)
return df.to_json(orient='split', index=False)
@app.route('/upload_master_data', methods=['POST'])
@tokenauth.login_required
def upload_master_data(inventory_checked=True):
"""Updates a master table from an input file."""
today = pd.Timestamp.today()
username = g.user
#df = pd.read_csv(os.path.join(working_directory, 'horaire_data.csv'))
csv = request.files.get('file')
filename = hashlib.md5(username.encode()).hexdigest()+'.csv'
csv.save(filename) # TODO avoid writing to disk
df = pd.read_csv(filename)
df['date'] = pd.to_datetime(df['date'])
df['date_added'] = today
user_database = get_user_database_name(username)
connection = sqlite3.connect(os.path.join(working_directory, user_database))
df.to_sql('master_data', con=connection, if_exists='append', index=False)
if inventory_checked:
df[['date', 'item']].to_sql('inventory_log', con=connection, if_exists='append', index=False)
return df.to_json(orient='split', index=False)
def gimme_schedules():
try:
merged = inspect_inventory_log(username = g.user)
schedules = generate_upcoming_tasks(merged)
return_values = []
for schedule in schedules:
title = schedule[0]
events = schedule[1]
for event in events:
item = dict()
item['title'] = title
item['date'] = event.strftime(format='%Y-%m-%d')
return_values.append(item)
except:
return_values = []
return return_values
@app.route('/all_events')
@tokenauth.login_required
def get_all_events():
"""Creates a schedule ahead of time."""
try:
merged = inspect_inventory_log(username = g.user)
schedules = generate_upcoming_tasks(merged)
return_values = []
for schedule in schedules:
title = schedule[0]
events = schedule[1]
for event in events:
item = dict()
item['title'] = title
item['date'] = event.strftime(format='%Y-%m-%d')
return_values.append(item)
user_database = get_user_database_name(username = g.user)
connection = sqlite3.connect(os.path.join(working_directory, user_database))
log = pd.read_sql('select * from inventory_log', con=connection)
log['title'] = log['item']
log = log[['date', 'title']]
log['date'] = log['date'].apply(lambda x: x.split(' ')[0])
log['color'] = 'green'
past = log.to_dict('records')
return_values.extend(past)
except:
traceback.print_exc()
return_values = []
return json.jsonify(return_values)
@app.route('/get_tasks')
@tokenauth.login_required
def get_tasks():
try:
merged = inspect_inventory_log(username = g.user)
schedules = generate_upcoming_tasks(merged, exclude_past=False)
today = pd.Timestamp.today()
grouped_tasks = dict()
schedules = sorted(schedules, key=lambda x: x[1][0])
for schedule in schedules:
title = schedule[0]
check_date = schedule[1][0]
if check_date < today:
check_date = "Past due"
elif check_date == today:
check_date = "Today"
elif check_date.week == today.week:
check_date = 'This week'
elif check_date.week == (today+ | pd.Timedelta(1, 'W') | pandas.Timedelta |
import urllib
import pytest
import pandas as pd
from pandas import testing as pdt
from anonympy import __version__
from anonympy.pandas import dfAnonymizer
from anonympy.pandas.utils_pandas import load_dataset
@pytest.fixture(scope="module")
def anonym_small():
df = load_dataset('small')
anonym = dfAnonymizer(df)
return anonym
@pytest.fixture(scope="module")
def anonym_big():
try:
df = load_dataset('big')
anonym = dfAnonymizer(df)
except urllib.error.HTTPError:
anonym = None
return anonym
def test_anonym_obj(anonym_small, anonym_big):
assert isinstance(anonym_small, dfAnonymizer), "should have\
returned `dfAnonymizer` object"
if anonym_big is None:
assert False, "Failed to fetch the DataFrame"
assert isinstance(anonym_big, dfAnonymizer), "should have returned\
`dfAnonymizer` object"
def test_numeric_noise(anonym_small):
output = anonym_small.numeric_noise('age', seed=42, inplace=False)
expected = pd.Series([38, 47], dtype='int64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_noise(['age', 'salary'],
seed=42,
inplace=False)
expected = pd.DataFrame({'age': [38, 47],
'salary': [59239.79912097112, 49323.30756879504]})
pdt.assert_frame_equal(expected, output)
def test_numeric_binning(anonym_small):
output = anonym_small.numeric_binning('salary', bins=2, inplace=False)
dtype = pd.CategoricalDtype([
pd.Interval(49315.0, 54279.0, closed='right'),
pd.Interval(54279.0, 59234.0, closed='right')],
ordered=True)
expected = pd.Series([
pd.Interval(54279.0, 59234.0, closed='right'),
pd.Interval(49315.0, 54279.0, closed='right')],
dtype=dtype)
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_binning(['age', 'salary'],
bins=2,
inplace=False)
dtype2 = pd.CategoricalDtype([
pd.Interval(33.0, 40.0, closed='right'),
pd.Interval(40.0, 48.0, closed='right')],
ordered=True)
ser2 = pd.Series([
pd.Interval(33.0, 40.0, closed='right'),
pd.Interval(40.0, 48.0, closed='right')],
dtype=dtype2)
expected = pd.DataFrame({'age': ser2, 'salary': expected})
pdt.assert_frame_equal(expected, output)
def test_numeric_masking(anonym_small):
output = anonym_small.numeric_masking('age', inplace=False)
expected = pd.Series([7.5, -7.5], dtype='float64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_masking(['age', 'salary'], inplace=False)
expected = pd.DataFrame({'age': [-4954.900676201789, 4954.900676201798],
'salary': [5.840670901327418e-15,
5.840670901327409e-15]})
pdt.assert_frame_equal(expected, output)
def test_numeric_rounding(anonym_small):
output = anonym_small.numeric_rounding('salary', inplace=False)
expected = pd.Series([60000.0, 50000.0], dtype='float64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_rounding(['age', 'salary'], inplace=False)
expected = pd.DataFrame({'age': {0: 30, 1: 50}, 'salary': {0: 60000.0,
1: 50000.0}})
pdt.assert_frame_equal(expected, output)
@pytest.mark.skipif(__version__ == '0.2.4',
reason="Requires anonympy >= 0.2.5")
def test_categorical_fake(anonym_small):
output = anonym_small.categorical_fake('name',
locale=['en_US'],
seed=42,
inplace=False)
expected = pd.Series(['<NAME>', '<NAME>'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_fake(['name', 'email'],
locale=['en_GB'],
seed=42,
inplace=False)
expected = pd.DataFrame({'name': {0: '<NAME>', 1: '<NAME>'},
'email': {0: '<EMAIL>',
1: '<EMAIL>'}})
pdt.assert_frame_equal(expected, output)
output = anonym_small.categorical_fake({'name': 'name_female'},
seed=42,
inplace=False)
expected = pd.Series(['<NAME>', '<NAME>'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_fake({'ssn': 'ssn', 'web': 'url'},
seed=42,
inplace=False)
expected = pd.DataFrame({'ssn': {0: '655-15-0410', 1: '760-36-4013'},
'web': {0: 'http://www.hill.net/',
1: 'http://johnson.com/'}})
pdt.assert_frame_equal(expected, output)
def test_categorical_fake_auto(anonym_small):
output = anonym_small.categorical_fake_auto(seed=42, inplace=False)
expected = pd.DataFrame({'name': {0: '<NAME>', 1: '<NAME>'},
'email': {0: '<EMAIL>',
1: '<EMAIL>'},
'ssn': {0: '655-15-0410', 1: '760-36-4013'}})
pdt.assert_frame_equal(expected, output)
@pytest.mark.skipif(__version__ == '0.2.4',
reason="Requires anonympy >= 0.2.5")
def test_categorical_resampling(anonym_small):
output = anonym_small.categorical_resampling('name',
inplace=False,
seed=42)
expected = pd.Series(['Bruce', 'Tony'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_resampling(['web', 'ssn'],
seed=2,
inplace=False)
expected = pd.DataFrame({'web':
{0: 'http://www.alandrosenburgcpapc.co.uk',
1: 'http://www.alandrosenburgcpapc.co.uk'},
'ssn': {0: '656564664', 1: '343554334'}})
pdt.assert_frame_equal(expected, output)
@pytest.mark.skipif(__version__ == '0.2.4',
reason="Requires anonympy >= 0.2.5")
def test_categorical_tokenization(anonym_small):
output = anonym_small.categorical_tokenization('name',
key='test',
inplace=False)
expected = pd.Series(['45fe1a783c', 'bda8a41313'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_tokenization(['web', 'ssn'],
key='test',
inplace=False)
expected = pd.DataFrame({'web': {0: 'e667d84f37', 1: '986a819ea2'},
'ssn': {0: '0f7c17cc6f', 1: 'f42ad34907'}})
pdt.assert_frame_equal(expected, output)
def test_categorical_email_masking(anonym_small):
output = anonym_small.categorical_email_masking('email', inplace=False)
expected = pd.Series(['<EMAIL>', '<EMAIL>'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_email_masking(['email', 'email'],
inplace=False)
expected = pd.DataFrame(
{'email': {0: '<EMAIL>', 1: '<EMAIL>'}})
pdt.assert_frame_equal(expected, output)
def test_datetime_noise(anonym_small):
output = anonym_small.datetime_noise('birthdate', seed=42, inplace=False)
expected = pd.Series([pd.Timestamp('1914-07-22 00:00:00'),
| pd.Timestamp('1970-10-25 00:00:00') | pandas.Timestamp |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(self):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
path = '%s.csv' % tm.rands(10)
pytest.raises(compat.FileNotFoundError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
assert result['D'].isna()[1:].all()
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
assert pd.isna(result.iloc[0, 29])
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
s.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
assert len(result) == 50
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
assert len(result) == 50
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
assert got == expected
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", '
'IKEA:s 1700-tals serie')
tm.assert_index_equal(result.columns,
Index(['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
tm.assert_series_equal(result['Numbers'], expected['Numbers'])
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
assert type(df.a[0]) is np.float64
assert df.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(self):
warning_type = False
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if self.engine == 'c' and self.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = self.read_csv(StringIO(data))
assert df.a.dtype == np.object
def test_integer_overflow_bug(self):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
assert result[0].dtype == np.float64
result = self.read_csv(StringIO(data), header=None, sep=r'\s+')
assert result[0].dtype == np.float64
def test_catch_too_many_names(self):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# see gh-10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
assert len(result) == 2
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
if self.engine == 'c':
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = self.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# see gh-10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_empty_with_multiindex(self):
# see gh-10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_float_parser(self):
# see gh-9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(self):
# see gh-12215
df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
('y', ['42e']), ('z', ['632E'])])
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
StringIO(data), float_precision=prec)
tm.assert_frame_equal(df_roundtrip, df)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = self.read_csv(StringIO(data))
assert result['ID'].dtype == object
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
for conv in (np.int64, np.uint64):
pytest.raises(OverflowError, self.read_csv,
StringIO(data), converters={'ID': conv})
# These numbers fall right inside the int64-uint64 range,
# so they should be parsed as string.
ui_max = np.iinfo(np.uint64).max
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min, ui_max]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([x])
tm.assert_frame_equal(result, expected)
# These numbers fall just outside the int64-uint64 range,
# so they should be parsed as string.
too_big = ui_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
# No numerical dtype can hold both negative and uint64 values,
# so they should be cast as string.
data = '-1\n' + str(2**63)
expected = DataFrame([str(-1), str(2**63)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
data = str(2**63) + '\n-1'
expected = DataFrame([str(2**63), str(-1)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# see gh-9535
expected = DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO('foo,bar\n'),
nrows=10, as_recarray=True)
result = DataFrame(result[2], columns=result[1],
index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = next(iter(self.read_csv(StringIO('foo,bar\n'),
chunksize=10, as_recarray=True)))
result = DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(DataFrame.from_records(result), expected,
check_index_type=False)
def test_eof_states(self):
# see gh-10728, gh-10548
# With skip_blank_lines = True
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# gh-10728: WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# gh-10548: EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
def test_uneven_lines_with_usecols(self):
# See gh-12203
csv = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10
"""
# make sure that an error is still thrown
# when the 'usecols' parameter is not provided
msg = r"Expected \d+ fields in line \d+, saw \d+"
with tm.assert_raises_regex(ValueError, msg):
df = self.read_csv(StringIO(csv))
expected = DataFrame({
'a': [0, 3, 8],
'b': [1, 4, 9]
})
usecols = [0, 1]
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b']
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_read_empty_with_usecols(self):
# See gh-12493
names = ['Dummy', 'X', 'Dummy_2']
usecols = names[1:2] # ['X']
# first, check to see that the response of
# parser when faced with no provided columns
# throws the correct error, with or without usecols
errmsg = "No columns to parse from file"
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''))
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''), usecols=usecols)
expected = DataFrame(columns=usecols, index=[0], dtype=np.float64)
df = self.read_csv(StringIO(',,'), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
expected = DataFrame(columns=usecols)
df = self.read_csv(StringIO(''), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
expected = DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# gh-8661, gh-8679: this should ignore six lines including
# lines with trailing whitespace and blank lines
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# gh-8983: test skipping set of rows after a row with trailing spaces
expected = DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_raise_on_sep_with_delim_whitespace(self):
# see gh-6607
data = 'a b c\n1 2 3'
with tm.assert_raises_regex(ValueError,
'you can only specify one'):
self.read_table(StringIO(data), sep=r'\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# see gh-9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
# COMMENT:
# normalizing data as in paper:
#Single-cell data preprocessing: All single-cell analyses were implemented using a custom software
#package in Python for the analysis of STARmap experiments. The per-cell expression matrix was first
#normalized for the expression value Eij across all genes j for each cell i with the formula:
#$Nij = ln(1+median(Ei:)*(Eij/Σ Ei:))$ (see methods: https://science.sciencemag.org/content/sci/suppl/2018/06/20/science.aat5691.DC1/aat5691-Wang-SM.pdf)
from scipy.io import loadmat
import numpy as np
import pandas as pd
x = loadmat('/Users/work/Documents/GitHub/ML_genomics_spatial_project_2020/sequentially_encoded/20180123_BS10_light.mat')
geneinfo = | pd.read_csv('/Users/work/Documents/GitHub/ML_genomics_spatial_project_2020/sequentially_encoded/gene_names.csv', header = 0) | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
from gmpy2 import bit_mask
from rulelist.datastructure.data import Data
from rulelist.rulelistmodel.categoricalmodel.categoricalstatistic import CategoricalFixedStatistic, \
CategoricalFreeStatistic
@pytest.fixture
def constant_parameters():
input_n_cutpoints = 5
input_discretization = "static"
input_target_data = "categorical"
input_minsupp = 0
dictinput = {"attribute1": np.arange(100),
"attribute2": np.array(["below50" if i < 50 else "above49" for i in range(100)])}
input_input_data = pd.DataFrame(data=dictinput)
yield input_input_data, input_n_cutpoints, input_discretization, input_target_data,input_minsupp
@pytest.fixture
def generate_inputvalues_one_target(constant_parameters):
input_input_data, input_n_cutpoints, input_discretization, input_target_data,input_minsupp = constant_parameters
# targets
dictoutput = {"target1": np.array(["below50" if i < 50 else "above49" for i in range(100)])}
input_output_data = pd.DataFrame(data=dictoutput)
data_class = Data(input_input_data, input_n_cutpoints, input_discretization,
input_output_data, input_target_data,input_minsupp)
input_bitarray_for_statistic = bit_mask(data_class.number_instances)
yield data_class, input_bitarray_for_statistic
@pytest.fixture
def generate_inputvalues_two_targets(constant_parameters):
input_input_data, input_n_cutpoints, input_discretization, input_target_data,input_minsupp = constant_parameters
# targets
dictoutput = {"target1": np.array(["below50" if i < 50 else "above49" for i in range(100)]),
"target2": np.array(["below25" if i < 25 else "above25" for i in range(100)])}
input_output_data = | pd.DataFrame(data=dictoutput) | pandas.DataFrame |
from time import sleep
import pathlib
import datetime
import pandas as pd
import trading_calendars
from vnpy.app.script_trader import ScriptEngine
from vnpy.trader.constant import Direction
from AccountTracker.database.database_influxdb import init
from AccountTracker.settings import database_set, acc_folder, sector_file
DAY_END = datetime.time(15, 15)
NIGHT_START = datetime.time(20, 50, 0)
NIGHT_END = datetime.time(3, 0, 0)
delta = datetime.timedelta(milliseconds=1)
dbmanager = init(1, database_set)
# Get public sessions data from Shanghai Stock Exchange
cn_calendar = trading_calendars.get_calendar('XSHG')
# sessions is datetime.date
sessions = [x.to_pydatetime().date() for x in cn_calendar.all_sessions]
try:
p = pathlib.Path(acc_folder['jinshan'])
outsource_df = pd.read_csv(p.name, parse_dates=['start', 'end'])
except:
outsource_df = None
# for sector mkv cal
sector = pd.read_csv(sector_file, header=None)
sector_map = dict(zip(sector.iloc[:, 1], sector.iloc[:, 2]))
def get_sym(s: str):
'''
rb2105 => rb
'''
a = 0
for c in s:
if c.isdigit():
break
a += 1
return s[:a]
def option_picker(s: str):
'''
s: rb2101.SHFE(vt_symbol)
if s is option, return basic str e.g: DCE.i2009-C-650
if s is SPC, skip it(tqsdk get no kline). e.g DCE.SPC a2101&m2101
'''
if '&' in s:
return None
elif len(s) > 12:
# options
# print('场内期权', s)
return s
return None
def tradeTime_TRANS(dd: dict):
'''
modify trade/order date: **for CTP gateway**
if trades happen at night: modify to previous date
if trades happen at day: save previous date;
'''
if dd:
for k, v in dd.items():
if v.datetime.time() > NIGHT_START or v.datetime.time() < NIGHT_END:
# needs modify
tmp_index = sessions.index(v.datetime.date())
actual_date = sessions[tmp_index-1]
dd[k].datetime = v.datetime.replace(
year=actual_date.year, month=actual_date.month, day=actual_date.day)
def check_timestamp(d: dict):
'''
input tradedict and/or orderdict.
if data in d has same timestamp, modify them to avoid overriding data in influxdb.
+1ms +2ms and so on.
'''
if d:
unique_timestamp = []
i = 1
sorted_key = list(d.keys())
sorted_key.sort()
for k in sorted_key:
v = d[k]
if v.datetime in unique_timestamp:
v.datetime += delta * i
i += 1
unique_timestamp.append(v.datetime)
def run(engine: ScriptEngine):
"""
脚本策略的主函数说明:
1. 唯一入参是脚本引擎ScriptEngine对象,通用它来完成查询和请求操作
2. 该函数会通过一个独立的线程来启动运行,区别于其他策略模块的事件驱动
3. while循环的维护,请通过engine.strategy_active状态来判断,实现可控退出
监控账户,保存数据到influxdb
**** CTP接口 ****
A. order日期时间是报单的实际时间,但是trade回报中,夜盘成交的日期是交易日的日期,而不是实际成家的日期(时间是准确的)
因此需要做一个转换:
1. 夜盘成交 => 必然是夜盘挂单 => 根据orderid调整日期(不改时间)
2. 白天成交 => 不修改
B. trade中,多笔成交可能在同一个时间戳下返回,因此后写入的trade会覆盖之前的一个记录:
1. 写入之前,检查时间是否相同,直接修改时间:500ms之内比如+1ms,+2ms等
"""
# for comparing with latest records
__subscribe_list = []
all_contract = | pd.DataFrame() | pandas.DataFrame |
from googleapiclient.discovery import build
import pandas as pd
raw_data = {'title': [], 'channelTitle': [], 'tags': []}
df_marks = | pd.DataFrame(raw_data) | pandas.DataFrame |
import datetime
from time import sleep
import pandas as pd
from loguru import logger
import ofanalysis.const as const
import ofanalysis.utility as ut
import tushare as ts
class TSDataUpdate:
def __init__(self, ts_pro_token:str):
self.__pro = ts.pro_api(ts_pro_token)
self.__today = datetime.date.today()
def retrieve_all(self):
self.retrieve_stock_basic()
self.retrieve_stock_daily_basic()
self.retrieve_stock_daily()
self.retrieve_fund_basic()
self.retrieve_fund_nav()
self.retrieve_fund_share()
self.retrieve_fund_manager()
self.retrieve_fund_portfolio()
def retrieve_stock_basic(self):
logger.info('全量更新股票基础信息stock_basic')
# 分页读取数据
df_stock_basic = pd.DataFrame()
i = 0
while True: # 分页读取数据
df_batch_result = self.__pro.stock_basic(**{
"ts_code": "",
"name": "",
"exchange": "",
"market": "",
"is_hs": "",
"list_status": "",
"limit": const.EACH_TIME_ITEM,
"offset": i
}, fields=[
"ts_code",
"symbol",
"name",
"area",
"industry",
"market",
"list_date",
"is_hs",
"delist_date",
"list_status",
"curr_type",
"exchange",
"cnspell",
"enname",
"fullname"
])
if len(df_batch_result) == 0:
break
df_stock_basic = pd.concat([df_stock_basic, df_batch_result], ignore_index=True)
i += const.EACH_TIME_ITEM
ut.db_del_dict_from_mongodb( # 非增量更新 先清空数据
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_BASIC,
query_dict={}
)
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_BASIC,
target_dict=df_stock_basic.to_dict(orient='records')
)
def retrieve_stock_daily_basic(self):
check_field = 'trade_date' # 设置增量更新依据字段
logger.info('更新股票每日指标stock_daily_basic')
existed_records = ut.db_get_distinct_from_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_DAILY_BASIC,
field=check_field
)
if len(existed_records) == 0: # 空表
trade_cal_start_date = '20000101'
else:
existed_records.sort(reverse=True) # 倒排
trade_cal_start_date = pd.to_datetime(existed_records[-1]) + datetime.timedelta(days=1)
trade_cal_start_date = trade_cal_start_date.strftime('%Y%m%d')
trade_cal_list = ut.get_trade_cal_from_ts(ts_pro_token=self.__pro, start_date=trade_cal_start_date)
for date in [x for x in trade_cal_list if x not in existed_records]:
logger.info('更新股票每日指标stock_daily_basic: %s的数据' % date)
df_daily = pd.DataFrame()
i = 0
while True: # 分页读取数据
for _ in range(const.RETRY_TIMES): # 重试机制
try:
df_batch_daily = self.__pro.daily_basic(**{
"ts_code": "",
"trade_date": date,
"start_date": "",
"end_date": "",
"limit": const.EACH_TIME_ITEM,
"offset": i
}, fields=[
"ts_code",
"trade_date",
"close",
"turnover_rate",
"turnover_rate_f",
"volume_ratio",
"pe",
"pe_ttm",
"pb",
"ps",
"ps_ttm",
"dv_ratio",
"dv_ttm",
"total_share",
"float_share",
"free_share",
"total_mv",
"circ_mv"
])
except:
sleep(1)
else:
break
if len(df_batch_daily) == 0:
break
df_daily = pd.concat([df_daily, df_batch_daily], ignore_index=True)
i += const.EACH_TIME_ITEM
if len(df_daily) == 0:
logger.info('日期:%s, 股票每日指标stock_daily_basic返回为空' % date)
continue
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_DAILY_BASIC,
target_dict=df_daily.to_dict(orient='records')
)
def retrieve_stock_daily(self):
check_field = 'trade_date' # 设置增量更新依据字段
logger.info('更新股票日线行情stock_daily')
existed_records = ut.db_get_distinct_from_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_DAILY,
field=check_field
)
if len(existed_records) == 0: # 空表
trade_cal_start_date = '20000101'
else:
existed_records.sort(reverse=True) # 倒排
trade_cal_start_date = | pd.to_datetime(existed_records[-1]) | pandas.to_datetime |
import math
import pandas as pd
import csv
import pathlib
import wx
import matplotlib
import matplotlib.pylab as pL
import matplotlib.pyplot as plt
import matplotlib.backends.backend_wxagg as wxagg
import re
import numpy as np
import scipy
import scipy.interpolate
import sys
#from mpl_toolkits.mplot3d import Axes3D
#import wx.lib.inspection as wxli
class ERTAPP(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY, title='ERT Editing',pos=(100,100),size=(500,500))
#Built from template here: https://wiki.wxpython.org/GridSizerTutorial
#Set up Panels
def setUpPanels(self):
self.topPanel = wx.Panel(self, wx.ID_ANY,size = (1000,10),name='Top Panel')
self.infoPanel = wx.Panel(self, wx.ID_ANY,size = (1000,50),name='Info Panel')
self.chartPanel = wx.Panel(self, wx.ID_ANY,size = (1000,500),name='Chart Panel')
self.bottomPanel= wx.Panel(self, wx.ID_ANY,size = (1000,130),name='Bottom Panel')
#need to create more panels, see here: https://stackoverflow.com/questions/31286082/matplotlib-in-wxpython-with-multiple-panels
def titleSetup(self):
bmp = wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, wx.ART_OTHER, (4, 4))
self.titleIco = wx.StaticBitmap(self.topPanel, wx.ID_ANY, bmp)
self.title = wx.StaticText(self.topPanel, wx.ID_ANY, 'Advanced ERT Editing')
#Declare inputs for first row
def inputSetup(self):
bmp = wx.ArtProvider.GetBitmap(wx.ART_TIP, wx.ART_OTHER, (4, 4))
self.inputOneIco = wx.StaticBitmap(self.topPanel, wx.ID_ANY, bmp)
self.labelOne = wx.StaticText(self.topPanel, wx.ID_ANY, 'Input ERT Data')
self.inputTxtOne = wx.TextCtrl(self.topPanel, wx.ID_ANY, '')
self.inputTxtOne.SetHint('Enter data file path here')
self.inputBrowseBtn = wx.Button(self.topPanel, wx.ID_ANY, 'Browse')
self.Bind(wx.EVT_BUTTON, self.onBrowse, self.inputBrowseBtn)
self.readInFileBtn = wx.Button(self.topPanel, wx.ID_ANY, 'Read Data')
self.Bind(wx.EVT_BUTTON, self.onReadIn, self.readInFileBtn)
self.inputDataType = wx.Choice(self.topPanel, id=wx.ID_ANY,choices=['.DAT (LS)','.TXT (LS)','.DAT (SAS)', '.VTK', '.XYZ'],name='.TXT (LS)')
self.Bind(wx.EVT_CHOICE,self.onDataType,self.inputDataType)
self.autoShiftBx = wx.CheckBox(self.topPanel,wx.ID_ANY, 'Auto Shift?')
self.autoShiftBx.SetValue(True)
#Row 3 item(s)
self.TxtProfileName = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Profile Name: ')
self.TxtProfileRange = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Profile Length: ')
self.TxtDataPts = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Data Points: ')
self.TxtBlank = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.TxtBlank2 = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.TxtMinElectSpcng = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Min. Electrode Spacing: ')
self.TxtProjectName = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Project Name: ')
self.TxtArray = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Array: ')
self.msgProfileName = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgProfileRange = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgDataPts = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgMinElectSpcng = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgProjectName = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgArray = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
# DataViz Area item(s)
def dataVizSetup(self):
self.editSlider = wx.Slider(self.chartPanel, pos=(200,0), id=wx.ID_ANY, style=wx.SL_TOP | wx.SL_AUTOTICKS | wx.SL_LABELS, name='Edit Data')
self.Bind(wx.EVT_SCROLL, self.onSliderEditEVENT, self.editSlider)
self.dataVizMsg1 = wx.StaticText(self.chartPanel, wx.ID_ANY, '')
self.dataVizMsg2 = wx.StaticText(self.chartPanel, wx.ID_ANY, '')
self.dataVizInput = wx.TextCtrl(self.chartPanel, wx.ID_ANY, '')
self.dataVizInputBtn = wx.Button(self.chartPanel, -1, "Use Value")
self.dataVizInputBtn.Bind(wx.EVT_BUTTON, self.ONdataVizInput)
self.saveEditsBtn = wx.Button(self.chartPanel, -1, "Save Edits")
self.saveEditsBtn.Bind(wx.EVT_BUTTON, self.ONSaveEdits)
self.saveEditsBtn.SetBackgroundColour((100,175,100))
self.currentChart = 'Graph'
self.editDataChoiceList = ['AppResist','Resistance','Electrode x-Dists','Variance','PctErr','PseudoX','PseudoZ']
self.editDataChoiceBool = [False]*len(self.editDataChoiceList)
self.editDataValues = []
for i in self.editDataChoiceList:
self.editDataValues.append([0,0])
self.editDataType = wx.Choice(self.chartPanel, id=wx.ID_ANY,choices=self.editDataChoiceList,name='Edit Data')
self.editDataType.Bind(wx.EVT_CHOICE, self.onSelectEditDataType)
self.setEditToggleBtn = wx.ToggleButton(self.chartPanel,wx.ID_ANY,'Unused',size=(25,30))
self.setEditToggleBtn.Bind(wx.EVT_TOGGLEBUTTON, self.onSetEditToggle)
self.labelMinRem = wx.StaticText(self.chartPanel, wx.ID_ANY, 'Min.')
self.inputTxtMinRem = wx.TextCtrl(self.chartPanel, wx.ID_ANY,style=wx.TE_PROCESS_ENTER, name='')
self.inputTxtMinRem.Bind(wx.EVT_TEXT_ENTER, self.onEditDataValueChangeEvent)
self.labelMaxRem = wx.StaticText(self.chartPanel, wx.ID_ANY,'Max.')
self.inputTxtMaxRem = wx.TextCtrl(self.chartPanel, wx.ID_ANY,style=wx.TE_PROCESS_ENTER,name= '')
self.inputTxtMaxRem.Bind(wx.EVT_TEXT_ENTER, self.onEditDataValueChangeEvent)
self.editTypeToggleBtn = wx.ToggleButton(self.chartPanel,wx.ID_ANY,'Remove',size=(25,50))
self.editTypeToggleBtn.Bind(wx.EVT_TOGGLEBUTTON, self.onEditTypeToggle)
self.editLogicToggleBtn = wx.ToggleButton(self.chartPanel,wx.ID_ANY,'OR',size=(25,25))
self.editLogicToggleBtn.Bind(wx.EVT_TOGGLEBUTTON, self.onLogicToggle)
self.removePtsBtn = wx.Button(self.chartPanel, -1, "Edit Points")
self.removePtsBtn.Bind(wx.EVT_BUTTON, self.onRemovePts)
self.electrodeToggleBtn = wx.ToggleButton(self.chartPanel,wx.ID_ANY,'On',size=(25,25))
self.electrodeToggleBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ONtoggle)
self.GraphEditBtn = wx.Button(self.chartPanel, -1, "Graphic Editor", size=(100, 30))
self.GraphEditBtn.Bind(wx.EVT_BUTTON, self.graphChartEvent)
self.StatEditBtn = wx.Button(self.chartPanel, -1, "Statistical Editor", size=(100, 30))
self.Bind(wx.EVT_BUTTON, self.statChartEvent, self.StatEditBtn)
self.addGPSBtn = wx.Button(self.chartPanel, -1, "GPS Data", size=(100, 30))
self.addGPSBtn.Bind(wx.EVT_BUTTON, self.GPSChartEvent)
self.addTopoBtn = wx.Button(self.chartPanel, -1, "Topography Data", size=(100, 30))
self.addTopoBtn.Bind(wx.EVT_BUTTON, self.topoChartEvent)
self.reviewBtn = wx.Button(self.chartPanel, -1, "Review Edits", size=(100, 15))
self.reviewBtn.Bind(wx.EVT_BUTTON, self.reviewEvent)
def bottomAreaSetup(self):
# Row 4 items
self.reverseBx = wx.CheckBox(self.bottomPanel,wx.ID_ANY, 'Reverse Profile')
self.labelGPSIN = wx.StaticText(self.bottomPanel, wx.ID_ANY, 'GPS Data')
self.inputTxtGPS = wx.TextCtrl(self.bottomPanel, wx.ID_ANY, 'Enter GPS Filepath Here')
self.inputGPSBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Browse')
self.Bind(wx.EVT_BUTTON, self.onGPSBrowse, self.inputGPSBtn)
self.Bind(wx.EVT_CHECKBOX, self.onReverse, self.reverseBx)
self.dataEditMsg = wx.StaticText(self.bottomPanel, wx.ID_ANY, '')
self.labelTopoIN = wx.StaticText(self.bottomPanel, wx.ID_ANY, 'Topo Data')
self.inputTxtTopo = wx.TextCtrl(self.bottomPanel, wx.ID_ANY, 'Enter Topo Filepath Here')
self.inputTopoBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Browse')
self.includeTopoBx = wx.CheckBox(self.bottomPanel,wx.ID_ANY, 'Include Topography')
self.Bind(wx.EVT_BUTTON, self.onTopoBrowse, self.inputTopoBtn)
self.Bind(wx.EVT_CHECKBOX, self.onIncludeTopo, self.includeTopoBx)
#Bottom Row items
self.saveBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Export and Save Data')
self.cancelBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Cancel')
self.Bind(wx.EVT_BUTTON, self.onExport, self.saveBtn)
self.Bind(wx.EVT_BUTTON, self.onCancel, self.cancelBtn)
self.labelExport = wx.StaticText(self.bottomPanel, wx.ID_ANY, 'Export Data')
self.exportTXT = wx.TextCtrl(self.bottomPanel, wx.ID_ANY, 'Enter Export Filepath Here')
self.exportDataBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Browse')
self.Bind(wx.EVT_BUTTON, self.onExportBrowse, self.exportDataBtn)
#Set up chart
def chartSetup(self):
self.chartSizer = wx.BoxSizer(wx.VERTICAL)
self.figure = matplotlib.figure.Figure()
self.canvas = wxagg.FigureCanvasWxAgg(self.chartPanel, -1, self.figure)
self.axes = self.figure.add_subplot(111)
self.axes.set_xlabel('X-Distance (m)')
self.axes.set_ylabel('Depth (m)')
self.toolbar = wxagg.NavigationToolbar2WxAgg(self.canvas)
def sizersSetup(self):
#Set up sizers
self.baseSizer = wx.BoxSizer(wx.VERTICAL)
self.topSizer = wx.BoxSizer(wx.VERTICAL)
self.titleSizer = wx.BoxSizer(wx.HORIZONTAL)
self.inputSizer = wx.BoxSizer(wx.HORIZONTAL)
#self.readMsgSizer = wx.BoxSizer(wx.HORIZONTAL)
self.profileInfoSizer = wx.BoxSizer(wx.HORIZONTAL)
self.profileTxtSizer1 = wx.BoxSizer(wx.VERTICAL)
self.profileTxtSizer2 = wx.BoxSizer(wx.VERTICAL)
self.profileMsgSizer1 = wx.BoxSizer(wx.VERTICAL)
self.profileMsgSizer2 = wx.BoxSizer(wx.VERTICAL)
self.profileInfoSizer = wx.BoxSizer(wx.HORIZONTAL)
self.ctrlSizer = wx.BoxSizer(wx.VERTICAL)
self.chartSizer = wx.BoxSizer(wx.VERTICAL)
self.dataVizSizer = wx.BoxSizer(wx.HORIZONTAL)
self.vizInfoSizer = wx.BoxSizer(wx.HORIZONTAL)
self.dataEditSizer = wx.BoxSizer(wx.HORIZONTAL)
self.bottomSizer = wx.BoxSizer(wx.VERTICAL)
self.GPSSizer = wx.BoxSizer(wx.HORIZONTAL)
self.TopoSizer = wx.BoxSizer(wx.HORIZONTAL)
self.botSizer = wx.BoxSizer(wx.HORIZONTAL)
def addtoSizers(self):
#Add items to sizers
self.titleSizer.Add(self.title, 0, wx.ALIGN_CENTER)
self.inputSizer.Add(self.labelOne, 1,wx.ALIGN_CENTER,5)
self.inputSizer.Add(self.inputTxtOne, 8,wx.EXPAND,5)
self.inputSizer.Add(self.inputBrowseBtn,1,wx.ALIGN_CENTER,5)
self.inputSizer.Add(self.inputDataType,1,wx.ALIGN_CENTER,5)
self.inputSizer.Add(self.readInFileBtn,1,wx.ALIGN_CENTER,5)
self.inputSizer.Add(self.autoShiftBx, 1, wx.ALIGN_CENTER, 5)
#self.readMsgSizer.Add(self.msgLabelOne, 0, wx.ALL,5)
self.profileTxtSizer1.Add(self.TxtProfileName, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer1.Add(self.TxtProfileRange, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer1.Add(self.TxtDataPts, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer2.Add(self.TxtMinElectSpcng, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer2.Add(self.TxtArray, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer2.Add(self.TxtProjectName, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer1.Add(self.msgProfileName, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer1.Add(self.msgProfileRange, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer1.Add(self.msgDataPts, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer2.Add(self.msgMinElectSpcng, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer2.Add(self.msgArray, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer2.Add(self.msgProjectName, 0, wx.ALIGN_LEFT,5)
self.profileInfoSizer.Add(self.profileTxtSizer1, 1,wx.ALL,5)
self.profileInfoSizer.Add(self.profileMsgSizer1,3,wx.ALL,5)
self.profileInfoSizer.Add(self.profileTxtSizer2, 1, wx.ALL, 5)
self.profileInfoSizer.Add(self.profileMsgSizer2, 3, wx.ALL, 5)
self.topSizer.Add(self.titleSizer,1,wx.ALL,5)
self.topSizer.Add(self.inputSizer, 2, wx.ALL, 5)
#self.topSizer.Add(self.readMsgSizer, 1, wx.ALL, 5)
self.vizInfoSizer.Add(self.dataVizMsg1,16,wx.ALL,5)
self.vizInfoSizer.Add(self.dataVizMsg2, 24, wx.ALL, 5)
self.vizInfoSizer.Add(self.electrodeToggleBtn,1,wx.ALL,5)
self.vizInfoSizer.Add(self.dataVizInput, 1, wx.ALL, 5)
self.vizInfoSizer.Add(self.dataVizInputBtn,3,wx.ALL,5)
self.vizInfoSizer.Add(self.saveEditsBtn,3,wx.ALL,5)
self.ctrlSizer.Add(self.GraphEditBtn, 2, wx.ALL, 5)
self.ctrlSizer.Add(self.StatEditBtn, 2, wx.ALL, 5)
self.ctrlSizer.Add(self.addGPSBtn, 2, wx.ALL, 5)
self.ctrlSizer.Add(self.addTopoBtn, 2, wx.ALL, 5)
self.ctrlSizer.Add(self.reviewBtn,1,wx.ALL,5)
self.dataEditSizer.Add(self.editDataType,5, wx.ALL, 5)
self.dataEditSizer.Add(self.setEditToggleBtn,2,wx.ALL,5)
self.dataEditSizer.Add(self.labelMinRem, 2, wx.ALL, 5)
self.dataEditSizer.Add(self.inputTxtMinRem, 3, wx.ALL, 5)
self.dataEditSizer.Add(self.inputTxtMaxRem, 3, wx.ALL, 5)
self.dataEditSizer.Add(self.labelMaxRem, 2, wx.ALL, 5)
self.dataEditSizer.Add(self.editTypeToggleBtn,3,wx.ALL,5)
self.dataEditSizer.Add(self.editLogicToggleBtn,2,wx.ALL,5)
self.dataEditSizer.Add(self.removePtsBtn, 3, wx.ALL, 5)
self.chartSizer.Add(self.vizInfoSizer, 1, wx.ALL, 5)
self.chartSizer.Add(self.editSlider,1, wx.LEFT | wx.RIGHT | wx.EXPAND,94)
self.chartSizer.Add(self.canvas, 12, wx.EXPAND)
self.chartSizer.Add(self.toolbar, 1, wx.EXPAND)
self.chartSizer.Add(self.dataEditSizer,1,wx.EXPAND)
self.dataVizSizer.Add(self.ctrlSizer,1,wx.EXPAND)
self.dataVizSizer.Add(self.chartSizer,6,wx.EXPAND)
self.GPSSizer.Add(self.dataEditMsg, 2, wx.ALL, 5)
self.GPSSizer.Add(self.reverseBx, 1, wx.ALL, 5)
self.GPSSizer.Add(self.labelGPSIN, 1, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.GPSSizer.Add(self.inputTxtGPS, 8, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.GPSSizer.Add(self.inputGPSBtn, 1, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.TopoSizer.Add(self.includeTopoBx, 2, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.TopoSizer.Add(self.labelTopoIN, 1, wx.ALIGN_CENTER_VERTICAL| wx.ALL, 5)
self.TopoSizer.Add(self.inputTxtTopo, 8, wx.ALIGN_CENTER_VERTICAL| wx.ALL, 5)
self.TopoSizer.Add(self.inputTopoBtn, 1, wx.ALIGN_CENTER_VERTICAL| wx.ALL, 5)
self.botSizer.Add(self.labelExport, 1, wx.ALL, 5)
self.botSizer.Add(self.exportTXT,6, wx.ALL, 5)
self.botSizer.Add(self.exportDataBtn,1, wx.ALL, 5)
self.botSizer.Add(self.cancelBtn, 1, wx.ALL, 5)
self.botSizer.Add(self.saveBtn, 1, wx.ALL, 5)
#btnSizer.Add(saveEditsBtn,0,wx.ALL,5)
self.bottomSizer.Add(self.GPSSizer,0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.bottomSizer.Add(self.TopoSizer,0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.bottomSizer.Add(self.botSizer,0, wx.ALIGN_RIGHT | wx.ALL, 5)
def addtoPanels(self):
self.topPanel.SetSizer(self.topSizer)
self.infoPanel.SetSizer(self.profileInfoSizer)
self.chartPanel.SetSizer(self.dataVizSizer)
self.bottomPanel.SetSizer(self.bottomSizer)
self.topPanel.Layout()
self.baseSizer.Add(self.topPanel,1, wx.EXPAND,1)
self.baseSizer.Add(self.infoPanel,1,wx.EXPAND,1)
self.baseSizer.Add(self.chartPanel, 10, wx.EXPAND | wx.ALL, 5)
self.baseSizer.Add(self.bottomPanel, 1, wx.EXPAND | wx.ALL, 1)
self.SetSizer(self.baseSizer)
self.SetSize(1100,950)
def variableInfo(): #To see what the 'global' variables are
pass
#self.electxDataIN: list of all electrode xdistances
#self.xCols: list with numbers of columns with x-values, from initial read-in table. varies with datatype
#self.xData: list with all x-values of data points
#self.zData: list with all z-values of data points (depth)
#self.values: list with all resist. values of data points
#self.inputDataExt: extension of file read in, selected from initial drop-down (default = .dat (LS))
#self.xDF : dataframe with only x-dist of electrodes, and all of them
#self.dataHeaders: headers from original file read in, used for column names for dataframeIN
#self.dataListIN: nested list that will be used to create dataframe, with all read-in data
#self.dataframeIN: initial dataframe from data that is read in
#self.df: dataframe formatted for editing, but remaining static as initial input data
#self.dataframeEDIT: dataframe that is manipulated during editing
#self.electrodes: sorted list of all electrode xdistances
#self.electrodesShifted: shifted, sorted list of all electrode xdistances
#self.electState:list of booleans giving status of electrode (True = in use, False = edited out)
#self.electrodeElevs: surface elevation values at each electrode
#self.dataLengthIN: number of measurements in file/length of dataframes
#self.dataframeEDITColHeaders
#self.dataShifted: indicates whether data has been shifted
setUpPanels(self)
titleSetup(self)
inputSetup(self)
dataVizSetup(self)
bottomAreaSetup(self)
chartSetup(self)
sizersSetup(self)
addtoSizers(self)
addtoPanels(self)
#wxli.InspectionTool().Show(self)
#Initial Plot
def nullFunction(self,event):
pass
def onBrowse(self,event):
with wx.FileDialog(self,"Open Data File", style= wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
self.dataPath = pathlib.Path(fileDialog.GetPath())
fName = str(self.dataPath.parent) + '\\' + self.dataPath.name
self.inputDataExt = self.dataPath.suffix
try:
with open(self.dataPath,'r') as datafile:
self.inputTxtOne.SetValue(fName)
except IOError:
wx.LogError("Cannot Open File")
if self.inputDataExt.lower() == '.txt':
self.inputDataExt = '.TXT (LS)'
n = 1
elif self.inputDataExt.lower() == '.dat':
if self.dataPath.stem.startswith('lr'):
self.inputDataExt = '.DAT (SAS)'
n = 2
else:
self.inputDataExt = '.DAT (LS)'
n = 0
elif self.inputDataExt.lower() == '.vtk':
self.inputDataExt = '.VTK'
n=3
elif self.inputDataExt.lower() == '.xyz':
self.inputDataExt = '.XYZ'
n=4
else:
wx.LogError("Cannot Open File")
if self.inputDataExt == '.DAT (LS)' or self.inputDataExt == '.TXT (LS)':
outPath = self.dataPath.stem.split('-')[0]
else:
outPath = self.dataPath.stem.split('.')[0]
if outPath.startswith('lr'):
outPath = outPath[2:]
outPath = outPath +'_pyEdit.dat'
if self.includeTopoBx.GetValue():
outPath = outPath[:-4]
outPath = outPath + "_topo.dat"
self.exportTXT.SetValue(str(self.dataPath.with_name(outPath)))
self.inputDataType.SetSelection(n)
self.readInFileBtn.SetLabelText('Read Data')
def onGPSBrowse(self,event):
with wx.FileDialog(self,"Open GPS File", style= wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
self.GPSpath = pathlib.Path(fileDialog.GetPath())
gpsFName = str(self.GPSpath.parent) + '\\' + self.GPSpath.name
self.inputTxtGPS.SetValue(gpsFName)
self.getGPSVals()
def getGPSVals(self):
with open(self.GPSpath) as GPSFile:
data = csv.reader(GPSFile)
self.gpsXData = []
self.gpsYData = []
self.gpsLabels = []
for row in enumerate(data):
if row[0] == 0:
pass #headerline
else:
r = re.split('\t+', str(row[1][0]))
if row[0] == '':
pass
else:
self.gpsLabels.append(r[2])
self.gpsXData.append(float(r[3]))
self.gpsYData.append(float(r[4]))
def onTopoBrowse(self,event):
with wx.FileDialog(self,"Open Topo File", style= wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
self.topoPath = pathlib.Path(fileDialog.GetPath())
topoFName = str(self.topoPath.parent) + '\\' + self.topoPath.name
self.inputTxtTopo.SetValue(topoFName)
self.includeTopoBx.SetValue(True)
self.getTopoVals()
self.topoText()
def onIncludeTopo(self,event):
self.topoText()
def topoText(self):
if self.includeTopoBx.GetValue() == True:
#print('topo' not in self.exportTXT.GetValue())
if 'topo' not in self.exportTXT.GetValue():
#print("It's Not in")
if len(self.exportTXT.GetValue())>0:
outPath = self.exportTXT.GetValue()
outPath = outPath[:int(len(outPath)-4)]
outPath = outPath + "_topo.dat"
self.exportTXT.SetValue(outPath)
elif self.includeTopoBx.GetValue() == False:
if '_topo' in self.exportTXT.GetValue():
outPath = self.exportTXT.GetValue()
#print(outPath)
strInd = int(outPath.find("_topo"))
strInd2 = strInd + 5
outPath = outPath[:strInd]+outPath[strInd2:]
self.exportTXT.SetValue(outPath)
def onReverse(self,event):
self.reverseText()
def reverseText(self):
if self.reverseBx.GetValue() == True:
if '_rev' not in self.exportTXT.GetValue():
if len(self.exportTXT.GetValue())>0:
outPath = self.exportTXT.GetValue()
outPath = outPath[:int(len(outPath)-4)]
outPath = outPath + "_rev.dat"
self.exportTXT.SetValue(outPath)
elif self.reverseBx.GetValue() == False:
if '_rev' in self.exportTXT.GetValue():
outPath = self.exportTXT.GetValue()
#print(outPath)
strInd = int(outPath.find("_rev"))
strInd2 = strInd + 4
outPath = outPath[:strInd]+outPath[strInd2:]
self.exportTXT.SetValue(outPath)
def getTopoVals(self):
with open(self.topoPath) as topoFile:
data = csv.reader(topoFile)
topoXData = []
topoYData = []
topoLabels = []
for row in enumerate(data):
if row[0] == 0:
pass
else:
r = re.split('\t+', str(row[1][0]))
if r[0] == '':
pass
else:
topoLabels.append(r[0])
topoXData.append(float(r[1]))
topoYData.append(float(r[2]))
self.topoDF = pd.DataFrame([topoXData, topoYData]).transpose()
self.topoDF.columns = ["xDist", "Elev"]
def onDataType(self,event):
self.inputDataExt = self.inputDataType.GetString(self.inputDataType.GetSelection())
if self.inputDataExt == '.DAT (LS)':
self.headerlines = 8
elif self.inputDataExt == '.DAT (SAS)':
self.headerlines = 5
elif self.inputDataExt == '.VTK':
self.headerlines = 5 #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
elif self.inputDataExt == '.XYZ':
self.header = 5 #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
elif self.inputDataExt =='':
self.headerlines = 8
else:
if len(self.inputTxtOne.GetValue()) > 0:
try:
with open(self.dataPath, 'r') as datafile:
filereader = csv.reader(datafile)
start = 0
for row in enumerate(filereader):
if start == 0:
if 'N\\tTime' in str(row[1]):
start = 1
self.headerlines = row[0]
else:
continue
else:
continue
except:
self.headerlines = -1
wx.LogError('Data File not selected')
else:
self.headerlines = -1
def onReadIn(self, event):
self.onDataType(self) #initialize number of headerlines to use
self.dataHeader = []
filepath = pathlib.Path(self.inputTxtOne.GetValue())
self.ext = str(filepath.suffix)
filename = str(filepath.stem)
self.dataframeEDITColHeaders = ['MeasID','A(x)','A(z)','B(x)','B(z)','M(x)','M(z)','N(x)','N(z)', 'aVal', 'nFac','PseudoX','PseudoZ','Resistance','AppResist','Cycles','Variance','DataLevel','DtLvlMean','PctErr','Keep']
if self.ext.lower() == '.dat':
###############Need to update to fit .txt data format
dataLst = []
self.dataLead = []
self.dataTail = []
with open(filepath) as dataFile:
data = csv.reader(dataFile)
if self.inputDataExt == '.DAT (SAS)':
self.dataHeaders = ['M(x)','aVal','nFac','AppResist']
i = 0
dataList=[]
for row in enumerate(data):
if row[0]>self.headerlines: #Read in actual data
if row[0] > self.headerlines + datalength: #Read in data tail
self.dataTail.append(row[1])
else:
#It sometimes reads the lines differently. Sometimes as a list (as it should) other times as a long string
if len(row[1]) < 4:
#Entire row is read as string
dataList.append(re.split(' +', row[1][0]))
else:
#Row is read correctly as separate columns
dataList.append(row[1])
i+=1
else:
if row[0] == 3: #Read in data length
datalength = float(row[1][0])
self.dataLead.append(row[1])#Create data lead variable for later use
datalengthIN = i
self.fileHeaderDict = {}
self.dataListIN = dataList #Formatted global nested list is created of data read in
project = self.dataLead[0][0]
array = self.dataLead[2][0]
if float(array) == 3:
array = "Dipole-Dipole"
msrmtType = 'Apparent Resistivity'
self.fileHeaderDict['Filename'] = filename
self.fileHeaderDict['Project'] = project
self.fileHeaderDict['minElectSpcng'] = round(float(self.dataLead[1][0]),2)
self.fileHeaderDict['Array'] = array
self.fileHeaderDict["Type of Measurement"] = msrmtType
self.fileHeaderDict['DataPts'] = self.dataLead[3][0]
self.dataframeIN = pd.DataFrame(self.dataListIN)
#Sometimes the data is read in with an extra column at the beginning. This fixes that.
if len(self.dataframeIN.columns) > 4:
del self.dataframeIN[0]
self.dataframeIN.reindex([0, 1, 2, 3], axis='columns')
self.dataframeIN = self.dataframeIN.astype(float)
self.dataframeIN.columns = self.dataHeaders
self.dataframeCols = [-2, -3, -4, -5, -6, 0, -7, -8, -9, 1, 2, -10, -11, -12, 3, -1, -1, -13, -14, -15,-16] # neg val ind. colums that need to be calculated
self.dataframeEDIT = pd.DataFrame()
dataframelength = len(self.dataframeIN.index)
nullList = []
keepList = []
zeroList = []
for i in range(0, dataframelength):
nullList.append(-1)
zeroList.append(0.0)
keepList.append(True)
# Create dataframe that will be used in editing process (self.dataframeEDIT) one column at a time
for item in enumerate(self.dataframeEDITColHeaders):
if self.dataframeCols[
item[0]] > -1: # Columns from dataframeIN that are directly read to dataframeEDIT
self.dataframeEDIT[item[1]] = self.dataframeIN.iloc[:, self.dataframeCols[item[0]]]
self.dataframeEDIT[item[1]] = self.dataframeEDIT[item[1]].astype(float)
elif self.dataframeCols[item[0]] == -1: # Null list (can't calculate)
self.dataframeEDIT[item[1]] = nullList
elif self.dataframeCols[item[0]] == -2: # Measure ID
for i in range(0, dataframelength):
self.dataframeEDIT.loc[i, item[1]] = i
elif self.dataframeCols[item[0]] == -3: # A(x)
self.dataframeIN['A(x)'] = self.dataframeIN['M(x)'] + self.dataframeIN['aVal'] + (self.dataframeIN['aVal']*self.dataframeIN['nFac']) + self.dataframeIN['aVal']
self.dataframeEDIT['A(x)'] = self.dataframeIN['A(x)']
elif self.dataframeCols[item[0]] == -4: # A(z)
self.dataframeEDIT[item[1]] = zeroList
elif self.dataframeCols[item[0]] == -5: # B(x)
self.dataframeIN['B(x)'] = self.dataframeIN['M(x)'] + self.dataframeIN['aVal'] + (self.dataframeIN['aVal']*self.dataframeIN['nFac'])
self.dataframeEDIT['B(x)'] = self.dataframeIN['B(x)']
elif self.dataframeCols[item[0]] == -6: # B(z)
self.dataframeEDIT[item[1]] = zeroList
#elif self.dataframeCols[item[0]] == -6: # M(x)
#Reads in directly
elif self.dataframeCols[item[0]] == -7: # M(z)
self.dataframeEDIT[item[1]] = zeroList
elif self.dataframeCols[item[0]] == -8: #N(x)
self.dataframeIN['N(x)'] = self.dataframeIN['M(x)'] + self.dataframeIN['aVal']
self.dataframeEDIT['N(x)'] = self.dataframeIN['N(x)']
elif self.dataframeCols[item[0]] == -9: # N(z)
self.dataframeEDIT[item[1]] = zeroList
elif self.dataframeCols[item[0]] == -10: # PseudoX
self.dataframeEDIT['PseudoX'] = (((self.dataframeEDIT['A(x)'] + self.dataframeEDIT[
'B(x)']) / 2) + ((self.dataframeEDIT['M(x)'] + self.dataframeEDIT['N(x)']) / 2)) / 2
elif self.dataframeCols[item[0]] == -11: # PseudoZ
n = self.dataframeEDIT['nFac']
a = self.dataframeEDIT['aVal']
self.dataframeEDIT['PseudoZ'] = round((((n ** 2) * -0.0018) + 0.2752 * n + 0.1483) * a, 1)
elif self.dataframeCols[item[0]] == -12: #Resistance
PI = math.pi
n = self.dataframeEDIT['nFac']
a = self.dataframeEDIT['aVal']
appR = self.dataframeIN['AppResist']
if self.fileHeaderDict['Array'] == 'Dipole-Dipole':
self.dataframeEDIT['Resistance'] = appR/(PI * n * (n + 1) * (n + 2) * a)
else:
print(
'Array is not Dipole-Dipole, but Dipole-Dipole k-factor used to calculate App. Resistivity')
elif self.dataframeCols[item[0]] == -13: #DataLevel
self.dataframeEDIT['DataLevel'] = nullList
uniqueDepths = self.dataframeEDIT['PseudoZ'].unique()
uniqueDepths = list(set(uniqueDepths.flatten()))
self.dataLevels = len(uniqueDepths)
dataLength = len(self.dataframeEDIT['PseudoZ'])
for i in range(0, dataLength):
self.dataframeEDIT.loc[i, 'DataLevel'] = uniqueDepths.index(
self.dataframeEDIT.loc[i, 'PseudoZ'])
elif self.dataframeCols[item[0]] == -14: #DtLvlMean
for i in uniqueDepths:
df = self.dataframeEDIT[self.dataframeEDIT.iloc[:, 12] == i]
dtLvlMean = df['AppResist'].mean()
indexList = df.index.values.tolist()
for ind in indexList:
self.dataframeEDIT.loc[ind, 'DtLvlMean'] = dtLvlMean
elif self.dataframeCols[item[0]] == -15: #PctErr
self.dataframeEDIT['PctErr'] = (abs(
self.dataframeEDIT['DtLvlMean'] - self.dataframeEDIT['AppResist'])) / \
self.dataframeEDIT['DtLvlMean']
elif self.dataframeCols[item[0]] == -16: #Keep
self.dataframeEDIT[item[1]] = keepList
else:
self.dataframeEDIT[item[1]] = nullList
elif self.inputDataExt == '.DAT (LS)': # If it's .DAT (LS)
self.dataHeaders = ["NoElectrodes",'A(x)', 'A(z)', 'B(x)', 'B(z)', 'M(x)', 'M(z)', 'N(x)', 'N(z)', 'Resistance']
datalength=12
dataList = []
for row in enumerate(data):
if row[0]>int(self.headerlines) and row[0] <= float(self.headerlines + datalength):
strrow = str(row[1])
strrow = strrow[2:-2]
splitrow = strrow.split('\\t')
if len(splitrow) != 10:
newrow = []
for i in splitrow:
val = i.strip()
newrow.append(val)
if len(newrow) < 9:
newrow = re.split(' +',newrow[0])
row = [float(i) for i in newrow]
dataList.append(row)
else:
dataList.append(splitrow)
elif row[0] <= int(self.headerlines):
if isinstance(row[1], list):
val = str(row[1])[2:-2]
else:
val = row[1]
self.dataLead.append(val)
if row[0] == 6:
datalength = float(row[1][0])
else:
self.dataTail.append(row[1])
self.dataListIN = dataList
self.fileHeaderDict = {}
project = self.dataLead[0]
dataFrmt = self.dataLead[2]
array = int(self.dataLead[3])
if array == 3:
array = "Dipole-Dipole"
msrmtType = str(self.dataLead[5])
if msrmtType.strip() == '0':
msrmtType = "Apparent Resistivity"
else:
msrmtType = 'Resistance'
self.fileHeaderDict['Filename'] = filename
self.fileHeaderDict['Project'] = project
self.fileHeaderDict['minElectSpcng'] = str(round(float(self.dataLead[1]),2))
self.fileHeaderDict['Array'] = array
self.fileHeaderDict["Type of Measurement"] = msrmtType
self.fileHeaderDict['DataPts'] = str(self.dataLead[6])
self.fileHeaderDict['DistType'] = str(self.dataLead[7])
self.dataframeIN = pd.DataFrame(self.dataListIN)
self.dataframeIN.columns = self.dataHeaders
self.dataframeCols = [-2, 1, 2, 3, 4, 5, 6, 7, 8, -3, -4, -5, -6, 9, -7, -1, -1, -8, -9, -10, -11] # neg val ind. colums that need to be calculated
self.dataframeEDIT = pd.DataFrame()
dataframelength = len(self.dataframeIN.index)
nullList = []
keepList = []
for i in range(0, dataframelength):
nullList.append(-1)
keepList.append(True)
# Create dataframe that will be used in editing process (self.dataframeEDIT) one column at a time
for item in enumerate(self.dataframeEDITColHeaders):
if self.dataframeCols[item[0]] > -1: #Columns from dataframeIN that are directly read to dataframeEDIT
self.dataframeEDIT[item[1]] = self.dataframeIN.iloc[:, self.dataframeCols[item[0]]]
self.dataframeEDIT[item[1]] = self.dataframeEDIT[item[1]].astype(float)
elif self.dataframeCols[item[0]] == -1: #Null list (can't calculate)
self.dataframeEDIT[item[1]] = nullList
elif self.dataframeCols[item[0]] == -2:#Measure ID
for i in range(0,dataframelength):
self.dataframeEDIT.loc[i,item[1]] = i
elif self.dataframeCols[item[0]] == -3: #A spacing
self.dataframeEDIT[item[1]] = abs(self.dataframeEDIT['A(x)'] - self.dataframeEDIT['B(x)'])
elif self.dataframeCols[item[0]] == -4: #N-factor
self.dataframeEDIT[item[1]] = abs(self.dataframeEDIT['B(x)'] - self.dataframeEDIT['N(x)']) / self.dataframeEDIT['aVal']
elif self.dataframeCols[item[0]] == -5:#PseduoX
self.dataframeEDIT['PseudoX'] = (((self.dataframeEDIT['A(x)']+self.dataframeEDIT['B(x)'])/2)+((self.dataframeEDIT['M(x)']+self.dataframeEDIT['N(x)'])/2))/2
elif self.dataframeCols[item[0]] == -6: #PseduoZ
n = self.dataframeEDIT['nFac']
a = self.dataframeEDIT['aVal']
self.dataframeEDIT['PseudoZ'] = round((((n**2)*-0.0018)+0.2752*n+0.1483)*a,1)
elif self.dataframeCols[item[0]] == -7:#AppResistivity
PI = math.pi
n = self.dataframeEDIT['nFac']
a = self.dataframeEDIT['aVal']
R = self.dataframeEDIT['Resistance']
if self.fileHeaderDict['Array'] == 'Dipole-Dipole':
self.dataframeEDIT['AppResist'] = PI*n*(n+1)*(n+2)*a*R
else:
print('Array is not Dipole-Dipole, but Dipole-Dipole k-factor used to calculate App. Resistivity')
elif self.dataframeCols[item[0]] == -8: #DataLevel
self.dataframeEDIT['DataLevel'] = nullList
uniqueDepths = self.dataframeEDIT['PseudoZ'].unique()
uniqueDepths = list(set(uniqueDepths.flatten()))
self.dataLevels = len(uniqueDepths)
dataLength = len(self.dataframeEDIT['PseudoZ'])
for i in range(0, dataLength):
self.dataframeEDIT.loc[i, 'DataLevel'] = uniqueDepths.index(self.dataframeEDIT.loc[i, 'PseudoZ'])
elif self.dataframeCols[item[0]] == -9: # DtLvlMean
for i in uniqueDepths:
df = self.dataframeEDIT[self.dataframeEDIT.iloc[:, 12] == i]
dtLvlMean = df['AppResist'].mean()
indexList = df.index.values.tolist()
for ind in indexList:
self.dataframeEDIT.loc[ind, 'DtLvlMean'] = dtLvlMean
elif self.dataframeCols[item[0]] == -10: #PctErr
self.dataframeEDIT['PctErr'] = (abs(
self.dataframeEDIT['DtLvlMean'] - self.dataframeEDIT['AppResist'])) / \
self.dataframeEDIT['DtLvlMean']
elif self.dataframeCols[item[0]] == -11: #Keep
self.dataframeEDIT[item[1]] = keepList
else:
self.dataframeEDIT[item[1]] = nullList
self.readInFileBtn.SetLabelText("Reset Data")
elif self.ext.lower() == '.txt':
with open(filepath, 'r') as datafile:
filereader = csv.reader(datafile)
start = 0
end = 0
fileHeader = []
data = []
for row in enumerate(filereader):
if start == 0:
if row[0] <= 13:
fileHeader.append(row[1])
fileHeader[row[0]] = fileHeader[row[0]][:]
if 'N\\tTime' in str(row[1]):
start = 1
self.headerlines = row[0]
dataHdrTemp = str(row[1])
self.dataHeaders = dataHdrTemp[2:-2].split('\\t')
self.dataHeaders[1] = dataHdrTemp[1].strip()
self.fileHeaderDict = {}
for item in fileHeader:
if len(item) > 0:
self.fileHeaderDict[str(item[0]).split(":", 1)[0]] = str(item[0]).split(":", 1)[1].strip()
elif start == 1 and end == 0:
if len(row[1]) > 0:
data.append(str(row[1])[2:-1].split('\\t'))
else:
end = 1
else:
continue
self.dataListIN = data
self.dataframeIN = pd.DataFrame(self.dataListIN)
self.dataframeCols = [0, 6, 8, 9, 11, 12, 14, 15, 17, -2, -3, 18, 20, 26, 28, 29, 27, -4, -5, -6, -7] #neg val ind. colums that need to be calculated
self.dataframeEDIT = pd.DataFrame()
dataframelength = len(self.dataframeIN.index)
nullList = []
keepList = []
for i in range(0, dataframelength):
nullList.append(-1)
keepList.append(True)
# Create dataframe that will be used in editing process (self.dataframeEDIT) one column at a time
for item in enumerate(self.dataframeEDITColHeaders):
if self.dataframeCols[item[0]] > -1:
#print(item[1])
self.dataframeEDIT[item[1]] = self.dataframeIN.iloc[:, self.dataframeCols[item[0]]]
self.dataframeEDIT[item[1]] = self.dataframeEDIT[item[1]].astype(float)
elif self.dataframeCols[item[0]] == -2:
self.dataframeEDIT[item[1]] = abs(self.dataframeEDIT['A(x)'] - self.dataframeEDIT['B(x)'])
elif self.dataframeCols[item[0]] == -3:
self.dataframeEDIT[item[1]] = abs(self.dataframeEDIT['N(x)'] - self.dataframeEDIT['M(x)']) / self.dataframeEDIT['aVal']
elif self.dataframeCols[item[0]] == -4:
self.dataframeEDIT['DataLevel'] = nullList
uniqueDepths = self.dataframeEDIT['PseudoZ'].unique()
uniqueDepths = list(set(uniqueDepths.flatten()))
self.dataLevels = len(uniqueDepths)
dataLength = len(self.dataframeEDIT['PseudoZ'])
for i in range(0, dataLength):
self.dataframeEDIT.loc[i, 'DataLevel'] = uniqueDepths.index(self.dataframeEDIT.loc[i, 'PseudoZ'])
elif self.dataframeCols[item[0]] == -5:
for i in uniqueDepths:
df = self.dataframeEDIT[self.dataframeEDIT.iloc[:, 12] == i]
dtLvlMean = df['AppResist'].mean()
indexList = df.index.values.tolist()
for ind in indexList:
self.dataframeEDIT.loc[ind, 'DtLvlMean'] = dtLvlMean
elif self.dataframeCols[item[0]] == -6:
self.dataframeEDIT['PctErr'] = (abs(self.dataframeEDIT['DtLvlMean'] - self.dataframeEDIT['AppResist'])) / self.dataframeEDIT['DtLvlMean']
elif self.dataframeCols[item[0]] == -7:
self.dataframeEDIT[item[1]] = keepList
else:
self.dataframeEDIT[item[1]] = nullList
self.dataHeaders[1] = 'MeasTime'
if len(self.dataHeaders) > 37:
self.dataHeaders[37] = 'Extra'
self.dataTail = [0,0,0,0,0,0,0]
self.dataframeIN.columns = self.dataHeaders
self.readInFileBtn.SetLabelText("Reset Data")
self.fileHeaderDict['Filename'] = filename
self.fileHeaderDict['Project'] = self.fileHeaderDict['Project name']
self.fileHeaderDict['Array'] = self.fileHeaderDict['Protocol file'][21:-4]
self.fileHeaderDict['minElectSpcng'] = self.fileHeaderDict['Smallest electrode spacing']
self.fileHeaderDict['DataPts'] = len(self.dataframeIN)
self.dataLead = []
self.dataLead.append(self.fileHeaderDict['Project name'] + " " + self.fileHeaderDict['Filename'])
self.dataLead.append(self.fileHeaderDict['minElectSpcng'])
self.dataLead.append('11') #General Array format
self.dataLead.append(self.fileHeaderDict['Sub array code']) #tells what kind of array is used
self.dataLead.append('Type of measurement (0=app.resistivity,1=resistance)')
self.dataLead.append('0') #Col 26 in .txt (col 28 is app. resistivity)
self.dataLead.append(self.fileHeaderDict['DataPts'])
self.dataLead.append('2')
self.dataLead.append('0')
elif self.ext.lower() == '.vtk':
with open(filepath, 'r') as datafile:
filereader = csv.reader(datafile)
startLocs = 0
startData = 0
startLocInd = 'POINTS'
startDataInd = 'LOOKUP_TABLE'
endLocs = 0
endData = 0
endLocInd = []
endDataInd = []
fileLead = []
fileMid = []
fileTail = []
vtkdata = []
vtklocs = []
newrow = []
xLocPts = []
yLocPts = []
zLocPts = []
vPts = []
for row in enumerate(filereader):
if startLocs == 0:
fileLead.append(row[1])
fileLead[row[0]] = fileLead[row[0]][:]
if startLocInd in str(row[1]):
startLocs = 1
elif startLocs == 1 and endLocs == 0:
if endLocInd == row[1]:
endLocs = 1
else:
newrow = re.split(' +', str(row[1][0]))
newrow = newrow[1:]
vtklocs.append(newrow)
elif startData == 0:
fileMid.append(row[1])
if startDataInd in str(row[1]):
startData = 1
elif startData == 1 and endData == 0:
if row[1] == endDataInd:
endData == 1
else:
newrow = re.split(' +', str(row[1][0]))
newrow = newrow[1:]
vtkdata.append(newrow)
else:
fileTail.append(row[1])
fileTail[row[0]] = fileTail[row[0]][:]
xPtCols = [0,3,6,9]
yPtCols = [1,4,7,10]
zPtCols = [2,5,8,11]
for r in vtklocs:
Xs = 0.0
for x in xPtCols:
Xs = Xs + float(r[x])
xLocPts.append(Xs/4.0)
Ys = 0.0
for y in yPtCols:
Ys = Ys + float(r[y])
yLocPts.append(Ys/4.0)
Zs = 0.0
for z in zPtCols:
Zs = Zs + float(r[z])
zLocPts.append(Zs/4)
for d in vtkdata:
for i in d:
vPts.append(i)
self.dataframeIN = | pd.DataFrame([xLocPts, yLocPts, zLocPts, vPts]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import dash
import pathlib
import copy
import dash_core_components as dcc
import dash_html_components as html
import dash_dangerously_set_inner_html
import plotly.graph_objs as go
import pandas as pd
import datetime as dt
from scipy.stats import sem, t
from scipy import mean
from dateutil.relativedelta import relativedelta
from dash.dependencies import Input, Output, State
import locale
import urllib.parse
import urllib.request
from zipfile import ZipFile
import os
import flask
from io import StringIO
from flask_babel import _
from flask import session, redirect, url_for, request
from header_footer import gc_header_en, gc_footer_en, gc_header_fr, gc_footer_fr
# Dropdown options
from controls import *
# get relative data folder
PATH = pathlib.Path(__file__).parent
DATA_PATH = PATH.joinpath("data").resolve() # path to "data" folder
IONOGRAM_PATH = 'U:/Downloads' # Directory to Ionogram images for testing
# IONOGRAM_PATH = '/storage_slow/ftp_root/users/OpenData_DonneesOuvertes/pub/AlouetteData/Alouette Data' # Directory to Ionogram images on server
# load data and transform as needed
df = pd.read_csv('data/final_alouette_data.csv') # edit for compatibility with CKAN portal (e.g. API to dataframe)
df['timestamp'] = | pd.to_datetime(df['timestamp']) | pandas.to_datetime |
# plots.py
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
def randomWalk():
"""Creates plot of symmetric one-D random lattice walk"""
N = 1000 #length of random walk
s = np.zeros(N)
s[1:] = np.random.binomial(1, .5, size=(N-1,))*2-1 #coin flips
s = | pd.Series(s) | pandas.Series |
import os
import pandas as pd
import numpy as np
"""
通过代码测试:
input_file = "data\\0.input\\enron\\ia-enron-email-dynamic.edges"
dataframe = pd.read_csv(input_file, sep=" ", names=['from_id', 'to_id', 'type', 'timestamp'])
dataframe['timestamp']=pd.to_datetime(dataframe['timestamp'],unit='ms').dt.strftime('%Y-%m-%d %H:%M:%s')
print(dataframe['timestamp'].unique())
最终确定粒度为天的时候比较合适,会落在以下这几个区间内
['1970-01-12' '1970-01-11' '1970-01-04' '1970-01-06' '1970-01-20' '1970-01-13' '1970-01-10' '1970-01-19' '1970-01-14']
"""
def check_and_make_path(to_make):
if to_make == '':
return
if not os.path.exists(to_make):
os.makedirs(to_make)
class ENRONTransformer:
input_file: str
format_output_path: str
use_good_data: bool
good_data_format: str
good_data_list: list
def __init__(self, input_file, output_base_path):
self.input_file = input_file
self.format_output_path = os.path.join(output_base_path, "1.format")
self.node_list_output_path = os.path.join(output_base_path, "nodes_set")
# 创建路径
check_and_make_path(self.format_output_path)
def transform(self, trans_type=None, use_good_data=False):
print("transforming ENRON...")
self.use_good_data = use_good_data
if trans_type is None:
trans_type = ['year', 'month', 'day']
if 'year' in trans_type:
self.handle_by_year()
if 'month' in trans_type:
self.handle_by_month()
if 'day' in trans_type:
self.handle_by_day()
print("transforming ENRON complete\n")
def handle_by_month(self):
# 按月处理
dataframe = pd.read_csv(self.input_file, sep=" ", names=['from_id', 'to_id', 'type', 'timestamp'])
dataframe['timestamp'] = pd.to_datetime(dataframe['timestamp'], unit='s').dt.strftime('%Y-%m')
candidate = ['1980-01', '1986-04', '1986-05', '1997-01', '1998-01', '1998-05', '1998-09', '1998-10', '1998-11',
'1998-12', '1999-01', '1999-02', '1999-03', '1999-04', '1999-05', '1999-06', '1999-07', '1999-08',
'1999-09', '1999-10', '1999-11', '1999-12', '2000-01', '2000-02', '2000-03', '2000-04', '2000-05',
'2000-06', '2000-07', '2000-08', '2000-09', '2000-10', '2000-11', '2000-12', '2001-01', '2001-02',
'2001-03', '2001-04', '2001-05', '2001-06', '2001-07', '2001-08', '2001-09', '2001-10', '2001-11',
'2001-12', '2002-01', '2002-02', '2002-03', '2002-04', '2002-05', '2002-06', '2002-07', '2002-10',
'2002-12', '2004-02', '2005-12', '2007-02', '2020-12', '2024-05']
# 00-01 node:78344
good_data = ['2000-01', '2000-02', '2000-03', '2000-04', '2000-05', '2000-06', '2000-07', '2000-08', '2000-09',
'2000-10', '2000-11', '2000-12', '2001-01', '2001-02', '2001-03', '2001-04', '2001-05', '2001-06',
'2001-07', '2001-08', '2001-09', '2001-10', '2001-11', '2001-12']
# 00 node:28033
good_data = ['2000-01', '2000-02', '2000-03', '2000-04', '2000-05', '2000-06', '2000-07', '2000-08', '2000-09',
'2000-10', '2000-11', '2000-12']
# 01 node:60813
good_data = ['2001-02', '2001-03', '2001-04', '2001-05', '2001-06', '2001-07', '2001-08', '2001-09', '2001-10',
'2001-11', '2001-12']
# node:42379
good_data = ['2000-06', '2000-07', '2000-08', '2000-09', '2000-10', '2000-11', '2000-12', '2001-01', '2001-02',
'2001-03', '2001-04', '2001-05', ]
# node:16344
good_data = ['1999-05', '1999-06', '1999-07', '1999-08', '1999-09', '1999-10', '1999-11', '1999-12', '2000-01',
'2000-02', '2000-03', '2000-04', '2000-05', '2000-06', '2000-07', '2000-08', '2000-09', '2000-10',
'2000-11', '2000-12', '2001-01', '2001-02', '2001-03', '2001-04', '2001-05', '2001-06', '2001-07',
'2001-08', '2001-09', '2001-10', '2001-11', '2001-12', '2002-01', '2002-02', '2002-03', '2002-04',
'2002-05', '2002-06']
if self.use_good_data:
self.good_data_format = '%Y-%m'
self.good_data_list = good_data
for month in (good_data if self.use_good_data else candidate):
tem = dataframe[['from_id', 'to_id']][dataframe['timestamp'] == month]
# mark 这里加前缀,不然后边做line deepwalk等相关操作时候pd.DataFrame读进来的不认识是字符串会出一些问题
tem['from_id'] = tem['from_id'].map(lambda x: "U" + str(x)) # user
tem['to_id'] = tem['to_id'].map(lambda x: "U" + str(x)) # user
# 统计权重
tem = tem.groupby(['from_id', 'to_id']).size().reset_index().rename(columns={0: 'weight'})
tem.to_csv(os.path.join(self.format_output_path, str(month) + ".csv"), sep='\t', header=1, index=0)
def handle_by_day(self):
# 按天处理
dataframe = | pd.read_csv(self.input_file, sep=" ", names=['from_id', 'to_id', 'type', 'timestamp']) | pandas.read_csv |
"""
Tests for statistical pipeline terms.
"""
from numpy import (
arange,
full,
full_like,
nan,
where,
)
from pandas import (
DataFrame,
date_range,
Int64Index,
Timestamp,
)
from pandas.util.testing import assert_frame_equal
from scipy.stats import linregress, pearsonr, spearmanr
from catalyst.assets import Equity
from catalyst.errors import IncompatibleTerms, NonExistentAssetInTimeFrame
from catalyst.pipeline import CustomFactor, Pipeline
from catalyst.pipeline.data import USEquityPricing
from catalyst.pipeline.data.testing import TestingDataSet
from catalyst.pipeline.engine import SimplePipelineEngine
from catalyst.pipeline.factors.equity import (
Returns,
RollingLinearRegressionOfReturns,
RollingPearsonOfReturns,
RollingSpearmanOfReturns,
)
from catalyst.pipeline.loaders.frame import DataFrameLoader
from catalyst.pipeline.sentinels import NotSpecified
from catalyst.testing import (
AssetID,
AssetIDPlusDay,
check_arrays,
make_alternating_boolean_array,
make_cascading_boolean_array,
parameter_space,
)
from catalyst.testing.fixtures import (
WithSeededRandomPipelineEngine,
WithTradingEnvironment,
CatalystTestCase,
)
from catalyst.utils.numpy_utils import (
bool_dtype,
datetime64ns_dtype,
float64_dtype,
)
class StatisticalBuiltInsTestCase(WithTradingEnvironment, CatalystTestCase):
sids = ASSET_FINDER_EQUITY_SIDS = Int64Index([1, 2, 3])
START_DATE = Timestamp('2015-01-31', tz='UTC')
END_DATE = Timestamp('2015-03-01', tz='UTC')
@classmethod
def init_class_fixtures(cls):
super(StatisticalBuiltInsTestCase, cls).init_class_fixtures()
day = cls.trading_calendar.day
cls.dates = dates = date_range(
'2015-02-01', '2015-02-28', freq=day, tz='UTC',
)
# Using these start and end dates because they are a contigous span of
# 5 days (Monday - Friday) and they allow for plenty of days to look
# back on when computing correlations and regressions.
cls.start_date_index = start_date_index = 14
cls.end_date_index = end_date_index = 18
cls.pipeline_start_date = dates[start_date_index]
cls.pipeline_end_date = dates[end_date_index]
cls.num_days = num_days = end_date_index - start_date_index + 1
sids = cls.sids
cls.assets = assets = cls.asset_finder.retrieve_all(sids)
cls.my_asset_column = my_asset_column = 0
cls.my_asset = assets[my_asset_column]
cls.num_assets = num_assets = len(assets)
cls.raw_data = raw_data = DataFrame(
data=arange(len(dates) * len(sids), dtype=float64_dtype).reshape(
len(dates), len(sids),
),
index=dates,
columns=assets,
)
# Using mock 'close' data here because the correlation and regression
# built-ins use USEquityPricing.close as the input to their `Returns`
# factors. Since there is no way to change that when constructing an
# instance of these built-ins, we need to test with mock 'close' data
# to most accurately reflect their true behavior and results.
close_loader = DataFrameLoader(USEquityPricing.close, raw_data)
cls.run_pipeline = SimplePipelineEngine(
{USEquityPricing.close: close_loader}.__getitem__,
dates,
cls.asset_finder,
).run_pipeline
cls.cascading_mask = \
AssetIDPlusDay() < (sids[-1] + dates[start_date_index].day)
cls.expected_cascading_mask_result = make_cascading_boolean_array(
shape=(num_days, num_assets),
)
cls.alternating_mask = (AssetIDPlusDay() % 2).eq(0)
cls.expected_alternating_mask_result = make_alternating_boolean_array(
shape=(num_days, num_assets),
)
cls.expected_no_mask_result = full(
shape=(num_days, num_assets), fill_value=True, dtype=bool_dtype,
)
@parameter_space(returns_length=[2, 3], correlation_length=[3, 4])
def _test_correlation_factors(self, returns_length, correlation_length):
"""
Tests for the built-in factors `RollingPearsonOfReturns` and
`RollingSpearmanOfReturns`.
"""
assets = self.assets
my_asset = self.my_asset
my_asset_column = self.my_asset_column
dates = self.dates
start_date = self.pipeline_start_date
end_date = self.pipeline_end_date
start_date_index = self.start_date_index
end_date_index = self.end_date_index
num_days = self.num_days
run_pipeline = self.run_pipeline
returns = Returns(window_length=returns_length)
masks = (self.cascading_mask, self.alternating_mask, NotSpecified)
expected_mask_results = (
self.expected_cascading_mask_result,
self.expected_alternating_mask_result,
self.expected_no_mask_result,
)
for mask, expected_mask in zip(masks, expected_mask_results):
pearson_factor = RollingPearsonOfReturns(
target=my_asset,
returns_length=returns_length,
correlation_length=correlation_length,
mask=mask,
)
spearman_factor = RollingSpearmanOfReturns(
target=my_asset,
returns_length=returns_length,
correlation_length=correlation_length,
mask=mask,
)
columns = {
'pearson_factor': pearson_factor,
'spearman_factor': spearman_factor,
}
pipeline = Pipeline(columns=columns)
if mask is not NotSpecified:
pipeline.add(mask, 'mask')
results = run_pipeline(pipeline, start_date, end_date)
pearson_results = results['pearson_factor'].unstack()
spearman_results = results['spearman_factor'].unstack()
if mask is not NotSpecified:
mask_results = results['mask'].unstack()
check_arrays(mask_results.values, expected_mask)
# Run a separate pipeline that calculates returns starting
# (correlation_length - 1) days prior to our start date. This is
# because we need (correlation_length - 1) extra days of returns to
# compute our expected correlations.
results = run_pipeline(
Pipeline(columns={'returns': returns}),
dates[start_date_index - (correlation_length - 1)],
dates[end_date_index],
)
returns_results = results['returns'].unstack()
# On each day, calculate the expected correlation coefficients
# between the asset we are interested in and each other asset. Each
# correlation is calculated over `correlation_length` days.
expected_pearson_results = full_like(pearson_results, nan)
expected_spearman_results = full_like(spearman_results, nan)
for day in range(num_days):
todays_returns = returns_results.iloc[
day:day + correlation_length
]
my_asset_returns = todays_returns.iloc[:, my_asset_column]
for asset, other_asset_returns in todays_returns.iteritems():
asset_column = int(asset) - 1
expected_pearson_results[day, asset_column] = pearsonr(
my_asset_returns, other_asset_returns,
)[0]
expected_spearman_results[day, asset_column] = spearmanr(
my_asset_returns, other_asset_returns,
)[0]
expected_pearson_results = DataFrame(
data=where(expected_mask, expected_pearson_results, nan),
index=dates[start_date_index:end_date_index + 1],
columns=assets,
)
assert_frame_equal(pearson_results, expected_pearson_results)
expected_spearman_results = DataFrame(
data=where(expected_mask, expected_spearman_results, nan),
index=dates[start_date_index:end_date_index + 1],
columns=assets,
)
assert_frame_equal(spearman_results, expected_spearman_results)
@parameter_space(returns_length=[2, 3], regression_length=[3, 4])
def _test_regression_of_returns_factor(self,
returns_length,
regression_length):
"""
Tests for the built-in factor `RollingLinearRegressionOfReturns`.
"""
assets = self.assets
my_asset = self.my_asset
my_asset_column = self.my_asset_column
dates = self.dates
start_date = self.pipeline_start_date
end_date = self.pipeline_end_date
start_date_index = self.start_date_index
end_date_index = self.end_date_index
num_days = self.num_days
run_pipeline = self.run_pipeline
# The order of these is meant to align with the output of `linregress`.
outputs = ['beta', 'alpha', 'r_value', 'p_value', 'stderr']
returns = Returns(window_length=returns_length)
masks = self.cascading_mask, self.alternating_mask, NotSpecified
expected_mask_results = (
self.expected_cascading_mask_result,
self.expected_alternating_mask_result,
self.expected_no_mask_result,
)
for mask, expected_mask in zip(masks, expected_mask_results):
regression_factor = RollingLinearRegressionOfReturns(
target=my_asset,
returns_length=returns_length,
regression_length=regression_length,
mask=mask,
)
columns = {
output: getattr(regression_factor, output)
for output in outputs
}
pipeline = Pipeline(columns=columns)
if mask is not NotSpecified:
pipeline.add(mask, 'mask')
results = run_pipeline(pipeline, start_date, end_date)
if mask is not NotSpecified:
mask_results = results['mask'].unstack()
check_arrays(mask_results.values, expected_mask)
output_results = {}
expected_output_results = {}
for output in outputs:
output_results[output] = results[output].unstack()
expected_output_results[output] = full_like(
output_results[output], nan,
)
# Run a separate pipeline that calculates returns starting
# (regression_length - 1) days prior to our start date. This is
# because we need (regression_length - 1) extra days of returns to
# compute our expected regressions.
results = run_pipeline(
Pipeline(columns={'returns': returns}),
dates[start_date_index - (regression_length - 1)],
dates[end_date_index],
)
returns_results = results['returns'].unstack()
# On each day, calculate the expected regression results for Y ~ X
# where Y is the asset we are interested in and X is each other
# asset. Each regression is calculated over `regression_length`
# days of data.
for day in range(num_days):
todays_returns = returns_results.iloc[
day:day + regression_length
]
my_asset_returns = todays_returns.iloc[:, my_asset_column]
for asset, other_asset_returns in todays_returns.iteritems():
asset_column = int(asset) - 1
expected_regression_results = linregress(
y=other_asset_returns, x=my_asset_returns,
)
for i, output in enumerate(outputs):
expected_output_results[output][day, asset_column] = \
expected_regression_results[i]
for output in outputs:
output_result = output_results[output]
expected_output_result = DataFrame(
where(expected_mask, expected_output_results[output], nan),
index=dates[start_date_index:end_date_index + 1],
columns=assets,
)
assert_frame_equal(output_result, expected_output_result)
def _test_correlation_and_regression_with_bad_asset(self):
"""
Test that `RollingPearsonOfReturns`, `RollingSpearmanOfReturns` and
`RollingLinearRegressionOfReturns` raise the proper exception when
given a nonexistent target asset.
"""
my_asset = Equity(0, exchange="TEST")
start_date = self.pipeline_start_date
end_date = self.pipeline_end_date
run_pipeline = self.run_pipeline
# This filter is arbitrary; the important thing is that we test each
# factor both with and without a specified mask.
my_asset_filter = AssetID().eq(1)
for mask in (NotSpecified, my_asset_filter):
pearson_factor = RollingPearsonOfReturns(
target=my_asset,
returns_length=3,
correlation_length=3,
mask=mask,
)
spearman_factor = RollingSpearmanOfReturns(
target=my_asset,
returns_length=3,
correlation_length=3,
mask=mask,
)
regression_factor = RollingLinearRegressionOfReturns(
target=my_asset,
returns_length=3,
regression_length=3,
mask=mask,
)
with self.assertRaises(NonExistentAssetInTimeFrame):
run_pipeline(
Pipeline(columns={'pearson_factor': pearson_factor}),
start_date,
end_date,
)
with self.assertRaises(NonExistentAssetInTimeFrame):
run_pipeline(
Pipeline(columns={'spearman_factor': spearman_factor}),
start_date,
end_date,
)
with self.assertRaises(NonExistentAssetInTimeFrame):
run_pipeline(
Pipeline(columns={'regression_factor': regression_factor}),
start_date,
end_date,
)
def test_require_length_greater_than_one(self):
my_asset = Equity(0, exchange="TEST")
with self.assertRaises(ValueError):
RollingPearsonOfReturns(
target=my_asset,
returns_length=3,
correlation_length=1,
)
with self.assertRaises(ValueError):
RollingSpearmanOfReturns(
target=my_asset,
returns_length=3,
correlation_length=1,
)
with self.assertRaises(ValueError):
RollingLinearRegressionOfReturns(
target=my_asset,
returns_length=3,
regression_length=1,
)
class StatisticalMethodsTestCase(WithSeededRandomPipelineEngine,
CatalystTestCase):
sids = ASSET_FINDER_EQUITY_SIDS = Int64Index([1, 2, 3])
START_DATE = Timestamp('2015-01-31', tz='UTC')
END_DATE = | Timestamp('2015-03-01', tz='UTC') | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import sys
import random
import gc
import pickle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
import seaborn as sns
sns.set_style("white")
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.model_selection import train_test_split
from tqdm import tqdm_notebook #, tnrange
#from itertools import chain
from skimage.io import imread, imshow #, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
from keras.models import Model, load_model, save_model
from keras.layers import Input,Dropout,BatchNormalization,Activation,Add
from keras.layers.core import Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras import backend as K
from keras import optimizers
from keras.callbacks import Callback
import keras.backend as K
import numpy as np
import imgaug as ia
from imgaug import augmenters as iaa
import tensorflow as tf
from tta_wrapper import tta_segmentation
from keras.preprocessing.image import array_to_img, img_to_array, load_img#,save_img
import imgaug
import time
t_start = time.time()
# In[2]:
VERSION = 32
SEED = 42
FOLDS = 5
DEPTH = True
basic_name = f'Unet_resnet_v{VERSION}'
save_model_name = basic_name + '.model'
save_model_name_lov = basic_name + '_lov.model'
submission_file = basic_name + '.csv'
imgaug.seed(SEED)
print(save_model_name)
print(save_model_name_lov)
print(submission_file)
# In[3]:
img_size_ori = 101
img_size_target = 101
def upsample(img):
if img_size_ori == img_size_target:
return img
return resize(img, (img_size_target, img_size_target), mode='constant', preserve_range=True)
def downsample(img):
if img_size_ori == img_size_target:
return img
return resize(img, (img_size_ori, img_size_ori), mode='constant', preserve_range=True)
# In[4]:
# Loading of training/testing ids and depths
train_df = pd.read_csv("../data/raw/train.csv", index_col="id", usecols=[0])
depths_df = pd.read_csv("../data/raw/depths.csv", index_col="id")
train_df = train_df.join(depths_df)
test_df = depths_df[~depths_df.index.isin(train_df.index)]
len(train_df)
# In[5]:
train_df["images"] = [np.array(load_img("../data/raw/train/images/{}.png".format(idx),
color_mode = "grayscale",)) / 255 for idx in tqdm_notebook(train_df.index)]
# In[6]:
train_df["masks"] = [np.array(load_img("../data/raw/train/masks/{}.png".format(idx),
color_mode = "grayscale",)) / 255 for idx in tqdm_notebook(train_df.index)]
# In[7]:
train_df["coverage"] = train_df.masks.map(np.sum) / pow(img_size_ori, 2)
def cov_to_class(val):
for i in range(0, 11):
if val * 10 <= i :
return i
train_df["coverage_class"] = train_df.coverage.map(cov_to_class)
# In[8]:
SUBSET = len(train_df)
train_df = train_df.head(SUBSET)
len(train_df)
# In[9]:
def BatchActivate(x):
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
x = Conv2D(filters, size, strides=strides, padding=padding)(x)
if activation == True:
x = BatchActivate(x)
return x
def residual_block(blockInput, num_filters=16, batch_activate = False):
x = BatchActivate(blockInput)
x = convolution_block(x, num_filters, (3,3) )
x = convolution_block(x, num_filters, (3,3), activation=False)
x = Add()([x, blockInput])
if batch_activate:
x = BatchActivate(x)
return x
# In[10]:
# Build model
def build_model(input_layer, start_neurons, DropoutRatio = 0.5):
# 101 -> 50
conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(input_layer)
conv1 = residual_block(conv1,start_neurons * 1)
conv1 = residual_block(conv1,start_neurons * 1, True)
pool1 = MaxPooling2D((2, 2))(conv1)
pool1 = Dropout(DropoutRatio/2)(pool1)
# 50 -> 25
conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(pool1)
conv2 = residual_block(conv2,start_neurons * 2)
conv2 = residual_block(conv2,start_neurons * 2, True)
pool2 = MaxPooling2D((2, 2))(conv2)
pool2 = Dropout(DropoutRatio)(pool2)
# 25 -> 12
conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(pool2)
conv3 = residual_block(conv3,start_neurons * 4)
conv3 = residual_block(conv3,start_neurons * 4, True)
pool3 = MaxPooling2D((2, 2))(conv3)
pool3 = Dropout(DropoutRatio)(pool3)
# 12 -> 6
conv4 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(pool3)
conv4 = residual_block(conv4,start_neurons * 8)
conv4 = residual_block(conv4,start_neurons * 8, True)
pool4 = MaxPooling2D((2, 2))(conv4)
pool4 = Dropout(DropoutRatio)(pool4)
# Middle
convm = Conv2D(start_neurons * 16, (3, 3), activation=None, padding="same")(pool4)
convm = residual_block(convm,start_neurons * 16)
convm = residual_block(convm,start_neurons * 16, True)
# 6 -> 12
deconv4 = Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding="same")(convm)
uconv4 = concatenate([deconv4, conv4])
uconv4 = Dropout(DropoutRatio)(uconv4)
uconv4 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(uconv4)
uconv4 = residual_block(uconv4,start_neurons * 8)
uconv4 = residual_block(uconv4,start_neurons * 8, True)
# 12 -> 25
#deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv4)
deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="valid")(uconv4)
uconv3 = concatenate([deconv3, conv3])
uconv3 = Dropout(DropoutRatio)(uconv3)
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = residual_block(uconv3,start_neurons * 4)
uconv3 = residual_block(uconv3,start_neurons * 4, True)
# 25 -> 50
deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv3)
uconv2 = concatenate([deconv2, conv2])
uconv2 = Dropout(DropoutRatio)(uconv2)
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = residual_block(uconv2,start_neurons * 2)
uconv2 = residual_block(uconv2,start_neurons * 2, True)
# 50 -> 101
#deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="valid")(uconv2)
uconv1 = concatenate([deconv1, conv1])
uconv1 = Dropout(DropoutRatio)(uconv1)
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = residual_block(uconv1,start_neurons * 1)
uconv1 = residual_block(uconv1,start_neurons * 1, True)
#uconv1 = Dropout(DropoutRatio/2)(uconv1)
#output_layer = Conv2D(1, (1,1), padding="same", activation="sigmoid")(uconv1)
output_layer_noActi = Conv2D(1, (1,1), padding="same", activation=None)(uconv1)
output_layer = Activation('sigmoid')(output_layer_noActi)
return output_layer
# In[11]:
def get_iou_vector(A, B):
batch_size = A.shape[0]
metric = []
for batch in range(batch_size):
t, p = A[batch]>0, B[batch]>0
intersection = np.logical_and(t, p)
union = np.logical_or(t, p)
iou = (np.sum(intersection > 0) + 1e-10 )/ (np.sum(union > 0) + 1e-10)
thresholds = np.arange(0.5, 1, 0.05)
s = []
for thresh in thresholds:
s.append(iou > thresh)
metric.append(np.mean(s))
return np.mean(metric)
def my_iou_metric(label, pred):
return tf.py_func(get_iou_vector, [label, pred>0.5], tf.float64)
def my_iou_metric_2(label, pred):
return tf.py_func(get_iou_vector, [label, pred >0], tf.float64)
# In[12]:
# code download from: https://github.com/bermanmaxim/LovaszSoftmax
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
gts = tf.reduce_sum(gt_sorted)
intersection = gts - tf.cumsum(gt_sorted)
union = gts + tf.cumsum(1. - gt_sorted)
jaccard = 1. - intersection / union
jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0)
return jaccard
# --------------------------- BINARY LOSSES ---------------------------
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
def treat_image(log_lab):
log, lab = log_lab
log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0)
log, lab = flatten_binary_scores(log, lab, ignore)
return lovasz_hinge_flat(log, lab)
losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32)
loss = tf.reduce_mean(losses)
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
return loss
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
def compute_loss():
labelsf = tf.cast(labels, logits.dtype)
signs = 2. * labelsf - 1.
errors = 1. - logits * tf.stop_gradient(signs)
errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort")
gt_sorted = tf.gather(labelsf, perm)
grad = lovasz_grad(gt_sorted)
#loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void")
loss = tf.tensordot(tf.nn.elu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void")
return loss
# deal with the void prediction case (only void pixels)
loss = tf.cond(tf.equal(tf.shape(logits)[0], 0),
lambda: tf.reduce_sum(logits) * 0.,
compute_loss,
strict=True,
name="loss"
)
return loss
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = tf.reshape(scores, (-1,))
labels = tf.reshape(labels, (-1,))
if ignore is None:
return scores, labels
valid = tf.not_equal(labels, ignore)
vscores = tf.boolean_mask(scores, valid, name='valid_scores')
vlabels = tf.boolean_mask(labels, valid, name='valid_labels')
return vscores, vlabels
def lovasz_loss(y_true, y_pred):
y_true, y_pred = K.cast(K.squeeze(y_true, -1), 'int32'), K.cast(K.squeeze(y_pred, -1), 'float32')
#logits = K.log(y_pred / (1. - y_pred))
logits = y_pred #Jiaxin
loss = lovasz_hinge(logits, y_true, per_image = True, ignore = None)
return loss
# In[13]:
def predict_result(model,x_test,img_size_target): # predict both orginal and reflect x
x_test_reflect = np.array([np.fliplr(x) for x in x_test])
preds_test = model.predict(x_test).reshape(-1, img_size_target, img_size_target)
preds_test2_refect = model.predict(x_test_reflect).reshape(-1, img_size_target, img_size_target)
preds_test += np.array([ np.fliplr(x) for x in preds_test2_refect] )
return preds_test/2
# In[14]:
def add_depth_coord(images):
""" Takes dataset (N, W, H, 1) returns (N, W, H, 3). """
if not DEPTH:
return images
assert(len(images.shape) == 4)
channel1 = np.zeros_like(images)
h = images.shape[1]
for row, const in enumerate(np.linspace(0, 1, h)):
channel1[:, row, ...] = const
channel2 = images * channel1
images = np.concatenate([images, channel1, channel2], axis=-1)
return images
class SGDRScheduler(Callback):
'''Cosine annealing learning rate scheduler with periodic restarts.
# Usage
```python
schedule = SGDRScheduler(min_lr=1e-5,
max_lr=1e-2,
steps_per_epoch=np.ceil(epoch_size/batch_size),
lr_decay=0.9,
cycle_length=5,
mult_factor=1.5)
model.fit(X_train, Y_train, epochs=100, callbacks=[schedule])
```
# Arguments
min_lr: The lower bound of the learning rate range for the experiment.
max_lr: The upper bound of the learning rate range for the experiment.
steps_per_epoch: Number of mini-batches in the dataset. Calculated as `np.ceil(epoch_size/batch_size)`.
lr_decay: Reduce the max_lr after the completion of each cycle.
Ex. To reduce the max_lr by 20% after each cycle, set this value to 0.8.
cycle_length: Initial number of epochs in a cycle.
mult_factor: Scale epochs_to_restart after each full cycle completion.
# References
Blog post: jeremyjordan.me/nn-learning-rate
Original paper: http://arxiv.org/abs/1608.03983
'''
def __init__(self,
min_lr,
max_lr,
steps_per_epoch,
lr_decay=1,
cycle_length=10,
mult_factor=2):
self.min_lr = min_lr
self.max_lr = max_lr
self.lr_decay = lr_decay
self.batch_since_restart = 0
self.next_restart = cycle_length
self.steps_per_epoch = steps_per_epoch
self.cycle_length = cycle_length
self.mult_factor = mult_factor
self.history = {}
def clr(self):
'''Calculate the learning rate.'''
fraction_to_restart = self.batch_since_restart / (self.steps_per_epoch * self.cycle_length)
lr = self.min_lr + 0.5 * (self.max_lr - self.min_lr) * (1 + np.cos(fraction_to_restart * np.pi))
return lr
def on_train_begin(self, logs={}):
'''Initialize the learning rate to the minimum value at the start of training.'''
logs = logs or {}
K.set_value(self.model.optimizer.lr, self.max_lr)
def on_batch_end(self, batch, logs={}):
'''Record previous batch statistics and update the learning rate.'''
logs = logs or {}
self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
self.batch_since_restart += 1
K.set_value(self.model.optimizer.lr, self.clr())
def on_epoch_end(self, epoch, logs={}):
'''Check for end of current cycle, apply restarts when necessary.'''
if epoch + 1 == self.next_restart:
self.batch_since_restart = 0
self.cycle_length = np.ceil(self.cycle_length * self.mult_factor)
self.next_restart += self.cycle_length
self.max_lr *= self.lr_decay
self.best_weights = self.model.get_weights()
def on_train_end(self, logs={}):
'''Set weights to the values from the end of the most recent cycle for best performance.'''
self.model.set_weights(self.best_weights)
# In[15]:
#Data augmentation
import cv2
affine_seq = iaa.Sequential([
# General
iaa.SomeOf((1, 2),
[iaa.Fliplr(0.5),
iaa.Affine(rotate=(-10, 10),
translate_percent={"x": (-0.05, 0.05)},
mode='edge'),
# iaa.CropAndPad(percent=((0.0, 0.0), (0.05, 0.0), (0.0, 0.0), (0.05, 0.0)))
]),
# Deformations
iaa.Sometimes(0.3, iaa.PiecewiseAffine(scale=(0.04, 0.08))),
iaa.Sometimes(0.3, iaa.PerspectiveTransform(scale=(0.05, 0.1))),
], random_order=True)
intensity_seq = iaa.Sequential([
iaa.Invert(0.3),
iaa.Sometimes(0.3, iaa.ContrastNormalization((0.5, 1.5))),
iaa.OneOf([
iaa.Noop(),
iaa.Sequential([
iaa.OneOf([
iaa.Add((-10, 10)),
iaa.AddElementwise((-10, 10)),
iaa.Multiply((0.95, 1.05)),
iaa.MultiplyElementwise((0.95, 1.05)),
]),
]),
iaa.OneOf([
iaa.GaussianBlur(sigma=(0.0, 1.0)),
iaa.AverageBlur(k=(2, 5)),
iaa.MedianBlur(k=(3, 5))
])
])
], random_order=False)
def augment(x, y):
sometimes = lambda aug: iaa.Sometimes(0.3, aug)
seq = iaa.Sequential([
iaa.Fliplr(0.5), # horizontally flip
sometimes(iaa.Add((-10, 10))),
# iaa.OneOf([
# iaa.Noop(),
# iaa.PerspectiveTransform(scale=(0.04, 0.08)),
# iaa.Add((-10, 10)),
# iaa.ContrastNormalization((0.75, 1.5)),
# iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)),
# iaa.EdgeDetect(alpha=(0, 0.7)),
# iaa.Noop(),
# sometimes(iaa.OneOf([
# iaa.EdgeDetect(alpha=(0, 0.7)),
# iaa.DirectedEdgeDetect(
# alpha=(0, 0.7), direction=(0.0, 1.0)
# ),
# ])),
# ]),
#sometimes(iaa.CropAndPad(
# percent=(-0.2, 0.2),
# pad_mode=["reflect"]
# )),
# sometimes(iaa.Sequential([
# iaa.Crop(percent=(0.2), keep_size=False),
# iaa.Scale({"height": img_size_target, "width": img_size_target}),
# iaa.Pad(percent=(0.2), pad_mode=["reflect"])
# ])),
])._to_deterministic()
images_aug_x = seq.augment_images(x)
images_aug_y = seq.augment_images(y)
return np.array(images_aug_x), np.array(images_aug_y)
# Return augmented images/masks arrays of batch size
def generator(features, labels, batch_size, repeat=1):
# create empty arrays to contain batch of features and labels
batch_features = np.zeros((batch_size, img_size_target, img_size_target, features.shape[3]))
batch_labels = np.zeros((batch_size, img_size_target, img_size_target, labels.shape[3]))
print(batch_features.shape)
while True:
# Fill arrays of batch size with augmented data taken randomly from full passed arrays
indexes = random.sample(range(len(features)), batch_size)*repeat
# Perform the exactly the same augmentation for X and y
random_augmented_images, random_augmented_labels = augment(np.apply_along_axis(np.squeeze, 1, features[indexes]*255).astype(np.uint8),
np.apply_along_axis(np.squeeze, 1, labels[indexes]*255).astype(np.uint8))
yield add_depth_coord(random_augmented_images/255), random_augmented_labels/255
#x_train = np.array(train_df.images.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 3)
#y_train = np.array(train_df.masks.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 3)
#x_test= np.array(test_df.images.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 3)
x_train = np.array(train_df.images.tolist()).reshape(-1, img_size_target, img_size_target, 1)
y_train = np.array(train_df.masks.tolist()).reshape(-1, img_size_target, img_size_target, 1)
train_cls = np.array(train_df.coverage_class)
gc.collect()
#x_train, y_train, train_cls = augment(train_df)
# In[16]:
#Score the model and do a threshold optimization by the best IoU.
# src: https://www.kaggle.com/aglotero/another-iou-metric
def iou_metric(y_true_in, y_pred_in, print_table=False):
labels = y_true_in
y_pred = y_pred_in
true_objects = 2
pred_objects = 2
# if all zeros, original code generate wrong bins [-0.5 0 0.5],
temp1 = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=([0,0.5,1], [0,0.5, 1]))
intersection = temp1[0]
area_true = np.histogram(labels,bins=[0,0.5,1])[0]
area_pred = np.histogram(y_pred, bins=[0,0.5,1])[0]
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
# Compute union
union = area_true + area_pred - intersection
# Exclude background from the analysis
intersection = intersection[1:,1:]
intersection[intersection == 0] = 1e-9
union = union[1:,1:]
union[union == 0] = 1e-9
# Compute the intersection over union
iou = intersection / union
# Precision helper function
def precision_at(threshold, iou):
matches = iou > threshold
true_positives = np.sum(matches, axis=1) == 1 # Correct objects
false_positives = np.sum(matches, axis=0) == 0 # Missed objects
false_negatives = np.sum(matches, axis=1) == 0 # Extra objects
tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)
return tp, fp, fn
# Loop over IoU thresholds
prec = []
if print_table:
print("Thresh\tTP\tFP\tFN\tPrec.")
for t in np.arange(0.5, 1.0, 0.05):
tp, fp, fn = precision_at(t, iou)
if (tp + fp + fn) > 0:
p = tp / (tp + fp + fn)
else:
p = 0
if print_table:
print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p))
prec.append(p)
if print_table:
print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec)))
return np.mean(prec)
def iou_metric_batch(y_true_in, y_pred_in):
batch_size = y_true_in.shape[0]
metric = []
for batch in range(batch_size):
value = iou_metric(y_true_in[batch], y_pred_in[batch])
metric.append(value)
return np.mean(metric)
# In[17]:
"""
used for converting the decoded image to rle mask
Fast compared to previous one
"""
def rle_encode(im):
'''
im: numpy array, 1 - mask, 0 - background
Returns run length as string formated
'''
pixels = im.flatten(order = 'F')
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
# In[18]:
x_test = np.array(
[(np.array(
load_img("../data/raw/test/images/{}.png".format(idx),
color_mode = "grayscale",))) / 255 for idx in tqdm_notebook(test_df.index)]).reshape(
-1, img_size_target, img_size_target, 1)
# In[19]:
from sklearn.model_selection import StratifiedKFold, KFold
def get_adv_cv(data, adv_class=None, folds=FOLDS):
if len(adv_class)>0:
print(len(data),len(adv_class))
assert len(data) == len(adv_class)
kfold_selector = StratifiedKFold(n_splits=folds, random_state=SEED, shuffle=True)
return [(train_idx, val_idx) for train_idx, val_idx in kfold_selector.split(data, adv_class)]
else:
folds = KFold(n_splits=folds, shuffle=True, random_state=SEED)
return folds.split(data)
def filter_xy(x, y, th=10): #32
y = np.array([img if np.sum(img) > 100 else np.zeros_like(img) for img in y])
y_s = np.array([i.sum() for i in y])
return x[(y_s==0) | (y_s>th)], y[(y_s==0) | (y_s>th)]
# In[20]:
metric = 'my_iou_metric'
val_metric = 'val_' + metric
restore_from_file = True
metric_lov = 'my_iou_metric_2'
val_metric_lov = 'val_' + metric_lov
early_stopping = EarlyStopping(monitor=val_metric, mode='max', patience=20, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor=val_metric, mode='max', factor=0.25, patience=10,
min_lr=0.0001, verbose=1)
early_stopping_lov = EarlyStopping(monitor=val_metric_lov, mode='max', patience=20, verbose=1)
reduce_lr_lov = ReduceLROnPlateau(monitor=val_metric_lov, mode='max', factor=0.25, patience=10,
min_lr=0.00005, verbose=1)
epochs = 400
batch_size = 128
#optimizer = RMSprop(lr=0.0001)
train_cls = np.array(train_df.coverage_class)
def get_oof(x_train, y, x_test):
ntrain = x_train.shape[0]
ntest = x_test.shape[0]
history = {}
threshold_best = {}
oof_train = np.zeros((ntrain, img_size_ori, img_size_ori))
oof_test = np.zeros((ntest, img_size_ori, img_size_ori))
oof_test_skf = np.empty((FOLDS, ntest, img_size_ori, img_size_ori))
model = None
for i, (train_index, test_index) in enumerate(get_adv_cv(x_train, train_cls, FOLDS)):
gc.collect()
print('\nFold {}'.format(i))
file_name = "../models/keras_unet_resnet_{0}_f{1}_{2}_v{3}.model".format(SEED, FOLDS, i, VERSION)
print(file_name)
y_valid_ori = np.array([y[idx] for idx in test_index])
model_checkpoint = ModelCheckpoint(file_name, monitor=val_metric, mode='max',
save_best_only=True, verbose=1)
x_tr = x_train[train_index, :]
y_tr = y[train_index]
x_te = add_depth_coord(x_train[test_index, :])
y_te = y[test_index]
print(x_tr.shape, y_tr.shape, x_te.shape)
x_tr, y_tr = filter_xy(x_tr, y_tr)
print(x_tr.shape, y_tr.shape, x_te.shape)
x_te_ext = np.append(x_te, [np.fliplr(x) for x in x_te], axis=0)
y_te_ext = np.append(y_te, [np.fliplr(x) for x in y_te], axis=0)
#g = generator(x_te, y_te, x_te.shape[0], 4)
#x_te, y_te = next(g)
print('new validation size:', x_te_ext.shape, y_te_ext.shape)
learning_rate = 0.01
depth = 1
if DEPTH:
depth = 3
input_layer = Input((img_size_target, img_size_target, depth))
output_layer = build_model(input_layer, 16, 0.5)
model1 = Model(input_layer, output_layer)
c = optimizers.adam(lr = learning_rate)
model1.compile(loss="binary_crossentropy", optimizer=c, metrics=[my_iou_metric])
if (not restore_from_file) or (not os.path.isfile(file_name)):
history[(i, 0)] = model1.fit_generator(generator(x_tr, y_tr, batch_size),
validation_data=[x_te_ext, y_te_ext],
epochs=epochs,
callbacks=[early_stopping, model_checkpoint, reduce_lr],
use_multiprocessing=True,
workers=1,
steps_per_epoch=len(x_tr)*2/batch_size,
)
model_lov = load_model(file_name, custom_objects={metric: my_iou_metric})
input_x = model_lov.layers[0].input
output_layer = model_lov.layers[-1].input
model = Model(input_x, output_layer)
learning_rate = 0.005
c = optimizers.adam(lr = learning_rate)
model.compile(loss=lovasz_loss, optimizer=c, metrics=[my_iou_metric_2])
model_checkpoint = ModelCheckpoint(file_name,
monitor=val_metric_lov,
mode='max',
save_best_only=True,
verbose=1)
history[(i, 1)] = model.fit_generator(generator(x_tr, y_tr, batch_size),
validation_data=[x_te_ext, y_te_ext],
epochs=epochs,
callbacks=[early_stopping_lov, model_checkpoint, reduce_lr_lov],
use_multiprocessing=True,
workers=1,
steps_per_epoch=len(x_tr)*2/batch_size,
)
schedule = SGDRScheduler(min_lr=1e-8, max_lr=3e-2, steps_per_epoch=np.ceil(len(x_tr)*2/batch_size),
lr_decay=0.8, cycle_length=5, mult_factor=1.5)
history[(i, 2)] = model.fit_generator(generator(x_tr, y_tr, batch_size),
validation_data=[x_te_ext, y_te_ext],
epochs=epochs,
callbacks=[early_stopping_lov, model_checkpoint, schedule],
use_multiprocessing=True,
workers=1,
steps_per_epoch=len(x_tr)*2/batch_size,
)
else:
model = load_model(file_name, custom_objects={'my_iou_metric_2': my_iou_metric_2,
'lovasz_loss':lovasz_loss})
#tta_model = model#tta_segmentation(model, h_flip=True, merge='mean')
#tta_model = TTA_ModelWrapper(model)
oof_train[test_index] = np.array([x for x in predict_result(model, x_te, img_size_target).reshape(-1, img_size_target, img_size_target)])
oof_test_skf[i, :] = np.array([x for x in predict_result(model, add_depth_coord(x_test), img_size_target).reshape(-1, img_size_target, img_size_target)])
thresholds = np.linspace(1e-5, .9999, 50)
thresholds = np.log(thresholds/(1-thresholds))
print(thresholds)
ious = np.array([
iou_metric_batch(
y_valid_ori, np.int32(
oof_train[test_index] > threshold)) for threshold in tqdm_notebook(thresholds)])
threshold_best_index = np.argmax(ious)
print('ious: ', ious)
iou_best = ious[threshold_best_index]
threshold_best[i] = thresholds[threshold_best_index]
print('threshold_best[{0}]: {1}'.format(i, threshold_best[i]))
print('iou_best: ', iou_best)
oof_train[test_index] = oof_train[test_index] > threshold_best[i]
oof_test_skf[i, :] = oof_test_skf[i, :] > threshold_best[i]
oof_test[:] += oof_test_skf[i, :] / FOLDS
del model
#del tta_model
#oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train, oof_test, oof_test_skf, history, threshold_best
# In[21]:
oof_train, oof_test, oof_test_skf, k_history, threshold_best = get_oof(x_train, y_train, x_test)
#0.802860696517413 0.8163545568039949 0.8210000000000001 0.814142678347935 0.8138190954773868
# In[ ]:
pred_dict = {idx: rle_encode(np.round(oof_test[i])) for i, idx in enumerate(tqdm_notebook(test_df.index.values))}
# In[ ]:
sub = pd.DataFrame.from_dict(pred_dict, orient='index')
sub.index.names = ['id']
sub.columns = ['rle_mask']
#sub.to_csv('../submissions/submission_oof_{0}_unet_resnet_v{1}.csv'.format(FOLDS, VERSION))
# In[ ]:
val_iou = iou_metric_batch(y_train, oof_train)
val_iou
# In[ ]:
str(np.round(val_iou, 3))[2:]
# In[ ]:
#0.8114000000000001 0.813625 0.8077750000000001
# In[ ]:
gc.collect()
pickle.dump(oof_train, open('../pickle/train_oof_{0}_unet_v{1}'.format(FOLDS, VERSION), 'wb+'), protocol=4)
pickle.dump(oof_test, open('../pickle/test_oof_{0}_unet_v{1}'.format(FOLDS, VERSION), 'wb+'), protocol=4)
#pickle.dump(oof_test_skf, open('../pickle/test_skf_{0}_oof_unet_v{1}'.format(FOLDS, VERSION), 'wb+'), protocol=4)
pickle.dump(threshold_best, open('../pickle/threshold_best_{0}_unet_v{1}'.format(FOLDS, VERSION), 'wb+'), protocol=4)
#for i in oof_test_skf:
#pickle.dump(oof_test_skf, open('../pickle/test_skf_{0}_oof_unet_v{1}'.format(FOLDS, VERSION), 'wb+'), protocol=4)
# In[ ]:
valid_dict = {idx: rle_encode(np.round(oof_train[i])) for i, idx in enumerate(tqdm_notebook(train_df.index.values))}
val = | pd.DataFrame.from_dict(valid_dict, orient='index') | pandas.DataFrame.from_dict |
import os
import keras
import keras.backend as backend
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from keras.callbacks import CSVLogger, History
from keras.layers import Input, Dense, Dropout, BatchNormalization
from keras.models import Model
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import normalize, LabelEncoder, label_binarize
"""
Created by <NAME> on 8/1/18.
Email : <EMAIL> or <EMAIL>
Website: http://ce.sharif.edu/~naghipourfar
Github: https://github.com/naghipourfar
Skype: mn7697np
"""
n_epochs = 300
batch_size = 32
def create_regressor(n_features, layers, n_outputs, optimizer=None):
input_layer = Input(shape=(n_features,))
dense = Dense(layers[0], activation='relu', name="dense_0")(input_layer)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
for i, layer in enumerate(layers[1:]):
dense = Dense(layer, activation='relu', name="dense_{0}".format(i + 1))(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
dense = Dense(n_outputs, activation='sigmoid', name="output")(dense)
model = Model(inputs=input_layer, outputs=dense)
if optimizer is None:
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, decay=1e-6, nesterov=True)
model.compile(optimizer=optimizer, loss=["mse"], metrics=["mae"])
return model
def random_classifier(drug_name=None, prediction_class=None):
accuracies = {}
data_directory = '../Data/CCLE/Classification/FS/'
if drug_name:
compounds = [drug_name + ".csv"]
else:
compounds = os.listdir(data_directory)
print("All Compounds:")
print(compounds)
for compound in compounds:
if compound.endswith(".csv") and not (
compound.__contains__("PLX4720") or compound.__contains__("Panobinostat")):
name = compound.split(".")[0]
print("*" * 50)
print(compound)
print("Loading Data...")
x_data, y_data = load_data(data_path=data_directory + compound, feature_selection=True)
print("Data has been Loaded!")
x_data = normalize_data(x_data)
print("Data has been normalized!")
n_samples = x_data.shape[0]
if prediction_class is None:
y_pred = np.random.random_integers(low=0, high=1, size=(n_samples, 1))
else:
if prediction_class == 1:
y_pred = np.ones(shape=[n_samples, 1])
else:
y_pred = np.zeros(shape=[n_samples, 1])
accuracies[name] = accuracy_score(y_data, y_pred)
print("%s's Accuracy\t:\t%.4f%%" % (compound.split(".")[0], 100 * accuracy_score(y_data, y_pred)))
log_path = "../Results/Classification/ML/"
log_name = "Random" + "-" + str(prediction_class) + ".csv" if prediction_class is not None else "Random.csv"
accuracies = pd.DataFrame(accuracies, index=[0])
accuracies.to_csv(log_path + log_name)
def create_SAE(n_features=50000, n_code=12):
input_layer = Input(shape=(n_features,))
dense = Dense(2048, activation='relu', name="dense_0")(input_layer)
dense = BatchNormalization()(dense)
dense = Dropout(0.2)(dense)
dense = Dense(1024, activation='relu', name="dense_1")(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
dense = Dense(256, activation='relu', name="dense_2")(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
dense = Dense(64, activation='relu', name="dense_3")(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
encoded = Dense(n_code, activation='relu', name="encoded")(dense)
dense = Dense(512, activation="relu", name="dense_4")(encoded)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
decoded = Dense(n_features, activation='sigmoid', name="decoded")(dense)
cl_output = Dense(2, activation="softmax", name="classifier")(encoded)
model = Model(inputs=input_layer, outputs=[decoded, cl_output])
model.summary()
lambda_value = 9.5581e-3
def contractive_loss(y_pred, y_true):
mse = backend.mean(backend.square(y_true - y_pred), axis=1)
w = backend.variable(value=model.get_layer('encoded').get_weights()[0]) # N inputs N_hidden
w = backend.transpose(w) # N_hidden inputs N
h = model.get_layer('encoded').output
dh = h * (1 - h) # N_batch inputs N_hidden
# N_batch inputs N_hidden * N_hidden inputs 1 = N_batch inputs 1
contractive = lambda_value * backend.sum(dh ** 2 * backend.sum(w ** 2, axis=1), axis=1)
return mse + contractive
reconstructor_loss = contractive_loss
classifier_loss = "categorical_crossentropy"
optimizer = keras.optimizers.Nadam(lr=0.005, beta_1=0.95)
model.compile(optimizer=optimizer, loss=[reconstructor_loss, classifier_loss],
loss_weights=[0.005, 0.005],
metrics={"decoded": ["mae", "mse", "mape"], "classifier": "acc"})
return model
def create_classifier(n_features=51, layers=None, n_outputs=1):
if layers is None:
layers = [1024, 256, 64, 16, 4]
input_layer = Input(shape=(n_features,))
dense = Dense(layers[0], activation='relu', name="dense_0")(input_layer)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
for i, layer in enumerate(layers[1:]):
dense = Dense(layer, activation='relu', name="dense_{0}".format(i + 1))(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
optimizer = keras.optimizers.adamax()
if n_outputs > 1:
dense = Dense(n_outputs, activation='softmax', name="output")(dense)
loss = keras.losses.categorical_crossentropy
else:
dense = Dense(n_outputs, activation='sigmoid', name="output")(dense)
loss = keras.losses.binary_crossentropy
model = Model(inputs=input_layer, outputs=dense)
model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"])
return model
def load_data(data_path="../Data/CCLE/drug_response.csv", feature_selection=False):
if data_path.__contains__("/FS/"):
data = pd.read_csv(data_path)
else:
data = pd.read_csv(data_path, index_col="Cell Line")
if data_path.__contains__("Regression"):
y_data = data['IC50 (uM)']
x_data = data.drop(['IC50 (uM)'], axis=1)
else:
y_data = data['class']
x_data = data.drop(['class'], axis=1)
label_encoder = LabelEncoder()
y_data = label_encoder.fit_transform(y_data)
y_data = np.reshape(y_data, (-1, 1))
y_data = keras.utils.to_categorical(y_data, 2)
if feature_selection and not data_path.__contains__("/FS/"):
feature_names = list(pd.read_csv("../Data/BestFeatures.csv", header=None).loc[0, :])
x_data = data[feature_names]
return np.array(x_data), np.array(y_data)
def produce_classification_data(compounds):
for compound in compounds:
name = compound.split(".")[0]
print(compound, end="\t")
data = | pd.read_csv("../Data/CCLE/Regression/" + name + "_preprocessed.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statistics import mean
def InputsSolarMarket():
# ------------------------ Inputs ------------------------------#
# Day Ahead Energy Market 2018
dataset_E = pd.read_csv('in/all_ercot_profiles_hourly_2018.csv')
E_price = dataset_E.iloc[0:8762, 4].values # Creating the price Vector $/MWh, start at 1/1/2018 00:00 CST
# Day Ahead Market - AS Down Regulation ERCOT hourly prices 2018
dataset_AS = pd.read_csv('in/AS_price.csv')
ASM_price = dataset_AS.iloc[70080:78840, 9].values # Creating the price Vector $/MWh, start at 1/1/2018 00:00 CST
# Solar CF
dataset_solar = pd.read_csv('in/all_ercot_profiles_hourly_2018.csv') # Reading the dataset of solar gen CF
solar_cf = dataset_solar.iloc[0:8762, 1].values # Creating the solar generation Vector, start 1/1/2018 00:00 (CST)
return E_price, solar_cf, ASM_price
def InputsSolarUncertainMul(eta):
# ---------Imports ------#
data = pd.DataFrame(pd.read_csv('in/all_ercot_profiles_hourly.csv', sep=';')) # Reading the dataset of solar gen CF
dataset_solar = data.loc[:, ['year', 'CF_model_solar']]
data_raw2015 = pd.DataFrame(pd.read_csv('in/solar_TX.csv', sep=';')) # Reading the dataset of solar gen CF
data2015 = data_raw2015.iloc[0:8760, 5].values.tolist()
df_years = pd.DataFrame({'CF_2015': data2015, 'CF_2016': dataset_solar.loc[
dataset_solar['year'] == 2016, 'CF_model_solar'].values.tolist(), 'CF_2017': dataset_solar.loc[
dataset_solar['year'] == 2017, 'CF_model_solar'].values.tolist(), 'CF_2018': dataset_solar.loc[
dataset_solar['year'] == 2018, 'CF_model_solar'].values.tolist()})
df = df_years.stack()
# --------Summary statistics - annual average day repeated ---#
df_years['Av_CF'] = df_years.mean(axis=1)
df_years['Std_CF'] = df_years.std(axis=1)
mean_cf = np.array(df_years['Av_CF'])
std_cf = np.array(df_years['Std_CF'])
# Inverse cdf for average year
#inv_cdf = stat.mean([np.percentile(df_years['CF_2015'], eta), np.percentile(df_years['CF_2016'], eta), np.percentile(df_years['CF_2017'], eta), np.percentile(df_years['CF_2018'], eta)])
#Above is for the stacked version - no!
#inv_cdf = np.percentile(df_years['Av_CF'], eta)
inv_cdf_raw = np.percentile(df_years['Av_CF'], eta)
inv_cdf = np.array([inv_cdf_raw for i in range(8760)])
# --------Create plots of cdf --------------#
num_bins = int(np.ceil(np.sqrt(8760)))
data = df_years['CF_2015']
counts, bin_edges = np.histogram(data, bins=num_bins)
cdf = np.cumsum(counts)
plt.plot(bin_edges[1:], cdf / cdf[-1], color='darkcyan', label='2015')
data = df_years['CF_2016']
counts, bin_edges = np.histogram(data, bins=num_bins)
cdf = np.cumsum(counts)
plt.plot(bin_edges[1:], cdf / cdf[-1], color='powderblue', label='2016')
data = df_years['CF_2017']
counts, bin_edges = np.histogram(data, bins=num_bins)
cdf = np.cumsum(counts)
plt.plot(bin_edges[1:], cdf / cdf[-1], color='darkturquoise', label='2017')
data = df_years['CF_2018']
counts, bin_edges = np.histogram(data, bins=num_bins)
cdf = np.cumsum(counts)
plt.plot(bin_edges[1:], cdf / cdf[-1], color='yellowgreen', label='2018')
data = df_years['Av_CF']
counts, bin_edges = np.histogram(data, bins=num_bins)
cdf = np.cumsum(counts)
plt.plot(bin_edges[1:], cdf / cdf[-1], color='black', label='Av')
data = df
counts, bin_edges = np.histogram(data, bins=num_bins)
cdf = np.cumsum(counts)
plt.plot(bin_edges[1:], cdf / cdf[-1], color='red', label='Stack')
plt.xlabel('Solar Capacity Factor', fontsize=10)
plt.ylabel('CDF', fontsize=10)
plt.title('Multi-year and average solar capacity factor', fontsize=12)
plt.legend()
plt.show()
return mean_cf, std_cf, inv_cdf
def InputsSolarUncertainHourly(eta, seasonal): #not used right now
# ---------Imports ------#
data = pd.DataFrame(pd.read_csv('in/all_ercot_profiles_hourly.csv', sep=';'))
dataset_solar_raw = data.loc[:, ['local_time_hb', 'CF_model_solar']]
data_raw2015 = pd.DataFrame(pd.read_csv('in/solar_TX.csv', sep=';')) # Reading the dataset of solar gen CF
data2015 = pd.DataFrame(data_raw2015.iloc[0:8760, 5])
data2015['local_time_hb'] = dataset_solar_raw.loc[0:8759, ['local_time_hb']] #change the six hours difference
#consolidate data for 4 years
dataset_solar = data2015.append(dataset_solar_raw, ignore_index=True)
# --------Parse data----#
dataset_solar.loc[:, 'dates-to-parse'] = | pd.to_datetime(dataset_solar['local_time_hb']) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 12:04:39 2018
@author: saintlyvi
"""
import time
import pandas as pd
import numpy as np
from sklearn.cluster import MiniBatchKMeans, KMeans
import somoclu
from experiment.algorithms.cluster_prep import xBins, preprocessX, clusterStats, bestClusters, saveLabels, saveResults
def kmeans(X, range_n_clusters, top_lbls=10, preprocessing = None, bin_X=False, experiment_name=None):
"""
This function applies the MiniBatchKmeans algorithm from sklearn on inputs X for range_n_clusters.
If preprossing = True, X is normalised with sklearn.preprocessing.normalize()
Returns cluster stats, cluster centroids and cluster labels.
"""
if experiment_name is None:
save = False
else:
if preprocessing is None:
pass
else:
experiment_name = experiment_name+'_'+ preprocessing
save = True
#apply pre-binning
if bin_X != False:
Xbin = xBins(X, bin_X)
else:
Xbin = {'all':X}
for b, ids in Xbin.items():
try:
A = X.loc[ids,:]
except:
A = ids
#apply preprocessing
A = preprocessX(A, norm=preprocessing)
centroids = pd.DataFrame()
stats = pd.DataFrame()
cluster_lbls = pd.DataFrame()
dim = 0 #set dim to 0 to match SOM formating
cluster_lbls_dim = {}
stats_dim = pd.DataFrame()
for n_clust in range_n_clusters:
clusterer = MiniBatchKMeans(n_clusters=n_clust, random_state=10)
#train clustering algorithm
tic = time.time()
clusterer.fit(A)
cluster_labels = clusterer.predict(A)
toc = time.time()
## Calculate scores
cluster_stats = clusterStats({}, n_clust, A, cluster_labels,
preprocessing = preprocessing, transform = None,
tic = tic, toc = toc)
cluster_centroids = clusterer.cluster_centers_
eval_results, centroid_results = saveResults(experiment_name, cluster_stats,
cluster_centroids, dim, b, save)
stats_dim = stats_dim.append(eval_results)
centroids = centroids.append(centroid_results)
cluster_lbls_dim[n_clust] = cluster_labels
#outside n_clust loop
best_clusters, best_stats = bestClusters(cluster_lbls_dim, stats_dim, top_lbls)
cluster_lbls = pd.concat([cluster_lbls, best_clusters], axis=1)
stats = pd.concat([stats, best_stats], axis=0)
stats.reset_index(drop=True, inplace=True)
if save is True:
saveLabels(cluster_lbls, stats)
return stats, centroids, cluster_lbls
def som(X, range_n_dim, top_lbls=10, preprocessing = None, bin_X=False, transform=None, experiment_name=None, **kwargs):
"""
This function applies the self organising maps algorithm from somoclu on inputs X over square maps of range_n_dim.
If preprossing = True, X is normalised with sklearn.preprocessing.normalize()
If kmeans = True, the KMeans algorithm from sklearn is applied to the SOM and returns clusters
kwargs can be n_clusters = range(start, end, interval) OR list()
Returns cluster stats, cluster centroids and cluster labels.
"""
for dim in range_n_dim:
limit = int(np.sqrt(len(X)/20))
if dim > limit: #verify that number of nodes are sensible for size of input data
return print('Input size too small for map. Largest n should be ' + str(limit))
else:
pass
if experiment_name is None:
save = False
else:
if preprocessing is None:
pass
else:
experiment_name = experiment_name+'_'+ preprocessing
save = True
#apply pre-binning
if bin_X != False:
Xbin = xBins(X, bin_X)
else:
Xbin = {'0-4000':X}
for b, ids in Xbin.items():
try:
A = X.loc[ids,:]
except:
A = ids
#apply preprocessing
A = preprocessX(A, norm=preprocessing)
centroids = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from tqdm import tqdm
from src.io.img_path_parser import get_paths
class DetectorBase():
def detect_image(self, img):
raise NotImplementedError()
def detect_images(self, imgs):
result = []
for img in tqdm(imgs):
pred = self.detect_image(img[1])
row = [img[0]]
for p in pred:
bbox = p[2]
bbox_str = "{x} {y} {w} {h}".format(x=bbox[0], y=bbox[1], w=bbox[2], h=bbox[3])
concept = p[0]
try:
concept = concept.decode('utf-8')
except AttributeError:
pass
row.extend([concept, p[1], bbox_str])
result.append(row)
return result
def detect_images_df(self, imgs):
print("Predicting...")
rows = self.detect_images(imgs)
cols = ["image_id"]
if len(rows) > 0:
col_count = max(len(r) for r in rows)
col_count = int((col_count - 1) / 3)
for i in range(col_count):
current_nr = str(i + 1).zfill(2)
cols.append("concept_class_top{nr}".format(nr=current_nr))
cols.append("concept_score_top{nr}".format(nr=current_nr))
cols.append("concept_bbox_top{nr}".format(nr=current_nr))
res = | pd.DataFrame(columns=cols, data=rows) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
from datetime import datetime
import requests
import numpy as np
import re
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
df = | pd.read_excel("https://ine.cl/docs/default-source/%C3%ADndice-de-precios-al-consumidor/cuadros-estadisticos/series-empalmadas-y-antecedentes-historicos/series-empalmadas-diciembre-2009-a-la-fecha/anal%C3%ADticos-empalmados-base-2018-xls.xlsx?sfvrsn=b0dd286_48", skiprows=4, sheet_name='Analiticos Base 2018=100') | pandas.read_excel |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.sparse import issparse
from pandas.api.types import is_numeric_dtype, is_categorical_dtype, is_list_like
from scipy.stats import zscore
from sklearn.metrics import adjusted_mutual_info_score
from natsort import natsorted
import anndata
from pegasusio import UnimodalData, MultimodalData
from typing import List, Tuple, Union, Optional, Callable
import logging
logger = logging.getLogger(__name__)
from pegasus.tools import X_from_rep, slicing
from .plot_utils import _transform_basis, _get_nrows_and_ncols, _get_marker_size, _get_dot_size, _get_subplot_layouts, _get_legend_ncol, _get_palette, RestrictionParser, DictWithDefault, _generate_categories, _plot_corners
def scatter(
data: Union[MultimodalData, UnimodalData, anndata.AnnData],
attrs: Union[str, List[str]],
basis: Optional[str] = "umap",
matkey: Optional[str] = None,
restrictions: Optional[Union[str, List[str]]] = None,
show_background: Optional[bool] = False,
alpha: Optional[Union[float, List[float]]] = 1.0,
legend_loc: Optional[Union[str, List[str]]] = "right margin",
legend_ncol: Optional[str] = None,
palettes: Optional[Union[str, List[str]]] = None,
cmaps: Optional[Union[str, List[str]]] = "YlOrRd",
vmin: Optional[float] = None,
vmax: Optional[float] = None,
nrows: Optional[int] = None,
ncols: Optional[int] = None,
panel_size: Optional[Tuple[float, float]] = (4, 4),
left: Optional[float] = 0.2,
bottom: Optional[float] = 0.15,
wspace: Optional[float] = 0.4,
hspace: Optional[float] = 0.15,
return_fig: Optional[bool] = False,
dpi: Optional[float] = 300.0,
**kwargs,
) -> Union[plt.Figure, None]:
"""Generate scatter plots for different attributes
Parameters
----------
data: ``pegasusio.MultimodalData``
Use current selected modality in data.
attrs: ``str`` or ``List[str]``
Color scatter plots by attrs. Each attribute in attrs can be one key in data.obs, data.var_names (e.g. one gene) or data.obsm (attribute has the format of 'obsm_key@component', like 'X_pca@0'). If one attribute is categorical, a palette will be used to color each category separately. Otherwise, a color map will be used.
basis: ``str``, optional, default: ``umap``
Basis to be used to generate scatter plots. Can be either 'umap', 'tsne', 'fitsne', 'fle', 'net_tsne', 'net_fitsne', 'net_umap' or 'net_fle'.
matkey: ``str``, optional, default: None
If matkey is set, select matrix with matkey as keyword in the current modality. Only works for MultimodalData or UnimodalData objects.
restrictions: ``str`` or ``List[str]``, optional, default: None
A list of restrictions to subset data for plotting. There are two types of restrictions: global restriction and attribute-specific restriction. Global restriction appiles to all attributes in ``attrs`` and takes the format of 'key:value,value...', or 'key:~value,value...'. This restriction selects cells with the ``data.obs[key]`` values belong to 'value,value...' (or not belong to if '~' shows). Attribute-specific restriction takes the format of 'attr:key:value,value...', or 'attr:key:~value,value...'. It only applies to one attribute 'attr'. If 'attr' and 'key' are the same, one can use '.' to replace 'key' (e.g. ``cluster_labels:.:value1,value2``).
show_background: ``bool``, optional, default: False
Only applicable if `restrictions` is set. By default, only data points selected are shown. If show_background is True, data points that are not selected will also be shown.
alpha: ``float`` or ``List[float]``, optional, default: ``1.0``
Alpha value for blending, from 0.0 (transparent) to 1.0 (opaque). If this is a list, the length must match attrs, which means we set a separate alpha value for each attribute.
legend_loc: ``str`` or ``List[str]``, optional, default: ``right margin``
Legend location. Can be either "right margin" or "on data". If a list is provided, set 'legend_loc' for each attribute in 'attrs' separately.
legend_ncol: ``str``, optional, default: None
Only applicable if legend_loc == "right margin". Set number of columns used to show legends.
palettes: ``str`` or ``List[str]``, optional, default: None
Used for setting colors for every categories in categorical attributes. Each string in ``palettes`` takes the format of 'attr:color1,color2,...,colorn'. 'attr' is the categorical attribute and 'color1' - 'colorn' are the colors for each category in 'attr' (e.g. 'cluster_labels:black,blue,red,...,yellow'). If there is only one categorical attribute in 'attrs', ``palletes`` can be set as a single string and the 'attr' keyword can be omitted (e.g. "blue,yellow,red").
cmaps: ``str`` or ``List[str]``, optional, default: ``YlOrRd``
Used for setting colormap for numeric attributes. Each string in ``cmaps`` takes the format of 'colormap' or 'attr:colormap'. 'colormap' sets the default colormap for all numeric attributes. 'attr:colormap' overwrites attribute 'attr's colormap as 'colormap'.
vmin: ``float``, optional, default: None
Minimum value to show on a numeric scatter plot (feature plot).
vmax: ``float``, optional, default: None
Maximum value to show on a numeric scatter plot (feature plot).
nrows: ``int``, optional, default: None
Number of rows in the figure. If not set, pegasus will figure it out automatically.
ncols: ``int``, optional, default: None
Number of columns in the figure. If not set, pegasus will figure it out automatically.
panel_size: `tuple`, optional (default: `(6, 4)`)
The panel size (width, height) in inches.
left: `float`, optional (default: `0.2`)
This parameter sets the figure's left margin as a fraction of panel's width (left * panel_size[0]).
bottom: `float`, optional (default: `0.15`)
This parameter sets the figure's bottom margin as a fraction of panel's height (bottom * panel_size[1]).
wspace: `float`, optional (default: `0.4`)
This parameter sets the width between panels and also the figure's right margin as a fraction of panel's width (wspace * panel_size[0]).
hspace: `float`, optional (defualt: `0.15`)
This parameter sets the height between panels and also the figure's top margin as a fraction of panel's height (hspace * panel_size[1]).
return_fig: ``bool``, optional, default: ``False``
Return a ``Figure`` object if ``True``; return ``None`` otherwise.
dpi: ``float``, optional, default: 300.0
The resolution of the figure in dots-per-inch.
Returns
-------
`Figure` object
A ``matplotlib.figure.Figure`` object containing the dot plot if ``return_fig == True``
Examples
--------
>>> pg.scatter(data, attrs=['louvain_labels', 'Channel'], basis='fitsne')
>>> pg.scatter(data, attrs=['CD14', 'TRAC'], basis='umap')
"""
if not is_list_like(attrs):
attrs = [attrs]
nattrs = len(attrs)
if not isinstance(data, anndata.AnnData):
cur_matkey = data.current_matrix()
if matkey is not None:
assert not isinstance(data, anndata.AnnData)
data.select_matrix(matkey)
x = data.obsm[f"X_{basis}"][:, 0]
y = data.obsm[f"X_{basis}"][:, 1]
# four corners of the plot
corners = np.array(np.meshgrid([x.min(), x.max()], [y.min(), y.max()])).T.reshape(-1, 2)
basis = _transform_basis(basis)
marker_size = _get_marker_size(x.size)
nrows, ncols = _get_nrows_and_ncols(nattrs, nrows, ncols)
fig, axes = _get_subplot_layouts(nrows=nrows, ncols=ncols, panel_size=panel_size, dpi=dpi, left=left, bottom=bottom, wspace=wspace, hspace=hspace, squeeze=False)
if not is_list_like(alpha):
alpha = [alpha] * nattrs
if not is_list_like(legend_loc):
legend_loc = [legend_loc] * nattrs
legend_fontsize = [5 if x == "on data" else 10 for x in legend_loc]
palettes = DictWithDefault(palettes)
cmaps = DictWithDefault(cmaps)
restr_obj = RestrictionParser(restrictions)
restr_obj.calc_default(data)
for i in range(nrows):
for j in range(ncols):
ax = axes[i, j]
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
if i * ncols + j < nattrs:
pos = i * ncols + j
attr = attrs[pos]
if attr in data.obs:
values = data.obs[attr].values
elif attr in data.var_names:
loc = data.var_names.get_loc(attr)
values = slicing(data.X, col = loc)
else:
obsm_key, sep, component = attr.partition("@")
if (sep != "@") or (obsm_key not in data.obsm) or (not component.isdigit()):
raise KeyError(f"{attr} is not in data.obs, data.var_names or data.obsm!")
values = data.obsm[obsm_key][:, int(component)]
selected = restr_obj.get_satisfied(data, attr)
if is_numeric_dtype(values):
cmap = cmaps.get(attr, squeeze = True)
if cmap is None:
raise KeyError(f"Please set colormap for attribute {attr} or set a default colormap!")
_plot_corners(ax, corners, marker_size)
img = ax.scatter(
x[selected],
y[selected],
c=values[selected],
s=marker_size,
marker=".",
alpha=alpha[pos],
edgecolors="none",
cmap=cmap,
vmin=vmin,
vmax=vmax,
rasterized=True,
)
left, bottom, width, height = ax.get_position().bounds
rect = [left + width * (1.0 + 0.05), bottom, width * 0.1, height]
ax_colorbar = fig.add_axes(rect)
fig.colorbar(img, cax=ax_colorbar)
else:
labels, with_background = _generate_categories(values, restr_obj.get_satisfied(data, attr))
label_size = labels.categories.size
palette = palettes.get(attr)
if palette is None:
palette = _get_palette(label_size, with_background=with_background, show_background=show_background)
elif with_background:
palette = ["gainsboro" if show_background else "white"] + list(palette)
text_list = []
for k, cat in enumerate(labels.categories):
idx = labels == cat
if idx.sum() > 0:
scatter_kwargs = {"marker": ".", "alpha": alpha[pos], "edgecolors": "none", "rasterized": True}
if cat != "":
if legend_loc[pos] != "on data":
scatter_kwargs["label"] = cat
else:
text_list.append((np.median(x[idx]), np.median(y[idx]), cat))
if cat != "" or (cat == "" and show_background):
ax.scatter(
x[idx],
y[idx],
c=palette[k],
s=marker_size,
**scatter_kwargs,
)
else:
_plot_corners(ax, corners, marker_size)
if legend_loc[pos] == "right margin":
legend = ax.legend(
loc="center left",
bbox_to_anchor=(1, 0.5),
frameon=False,
fontsize=legend_fontsize[pos],
ncol=_get_legend_ncol(label_size, legend_ncol),
)
for handle in legend.legendHandles:
handle.set_sizes([300.0])
elif legend_loc[pos] == "on data":
texts = []
for px, py, txt in text_list:
texts.append(ax.text(px, py, txt, fontsize=legend_fontsize[pos], fontweight = "bold", ha = "center", va = "center"))
# from adjustText import adjust_text
# adjust_text(texts, arrowprops=dict(arrowstyle='-', color='k', lw=0.5))
ax.set_title(attr)
else:
ax.set_frame_on(False)
if i == nrows - 1:
ax.set_xlabel(f"{basis}1")
if j == 0:
ax.set_ylabel(f"{basis}2")
# Reset current matrix if needed.
if not isinstance(data, anndata.AnnData):
if cur_matkey != data.current_matrix():
data.select_matrix(cur_matkey)
return fig if return_fig else None
def scatter_groups(
data: Union[MultimodalData, UnimodalData, anndata.AnnData],
attr: str,
groupby: str,
basis: Optional[str] = "umap",
matkey: Optional[str] = None,
restrictions: Optional[Union[str, List[str]]] = None,
show_full: Optional[bool] = True,
categories: Optional[List[str]] = None,
alpha: Optional[float] = 1.0,
legend_loc: Optional[str] = "right margin",
legend_ncol: Optional[str] = None,
palette: Optional[str] = None,
cmap: Optional[str] = "YlOrRd",
vmin: Optional[float] = None,
vmax: Optional[float] = None,
nrows: Optional[int] = None,
ncols: Optional[int] = None,
panel_size: Optional[Tuple[float, float]] = (4, 4),
left: Optional[float] = 0.2,
bottom: Optional[float] = 0.15,
wspace: Optional[float] = 0.4,
hspace: Optional[float] = 0.15,
return_fig: Optional[bool] = False,
dpi: Optional[float] = 300.0,
**kwargs,
) -> Union[plt.Figure, None]:
""" Generate scatter plots of attribute 'attr' for each category in attribute 'group'. Optionally show scatter plot containing data points from all categories in 'group'.
Parameters
----------
data: ``pegasusio.MultimodalData``
Use current selected modality in data.
attr: ``str``
Color scatter plots by attribute 'attr'. This attribute should be one key in data.obs, data.var_names (e.g. one gene) or data.obsm (attribute has the format of 'obsm_key@component', like 'X_pca@0'). If it is categorical, a palette will be used to color each category separately. Otherwise, a color map will be used.
groupby: ``str``
Generate separate scatter plots of 'attr' for data points in each category in 'groupby', which should be a key in data.obs representing one categorical variable.
basis: ``str``, optional, default: ``umap``
Basis to be used to generate scatter plots. Can be either 'umap', 'tsne', 'fitsne', 'fle', 'net_tsne', 'net_fitsne', 'net_umap' or 'net_fle'.
matkey: ``str``, optional, default: None
If matkey is set, select matrix with matkey as keyword in the current modality. Only works for MultimodalData or UnimodalData objects.
restrictions: ``str`` or ``List[str]``, optional, default: None
A list of restrictions to subset data for plotting. Each restriction takes the format of 'key:value,value...', or 'key:~value,value...'. This restriction selects cells with the ``data.obs[key]`` values belong to 'value,value...' (or not belong to if '~' shows).
show_full: ``bool``, optional, default: True
Show the scatter plot with all categories in 'groupby' as the first plot.
categories: ``List[str]``, optional, default: None
Redefine group structure based on attribute 'groupby'. If 'categories' is not None, each string in the list takes the format of 'category_name:value,value', or 'category_name:~value,value...", where 'category_name' refers to new category name, 'value' refers to one of the category in 'groupby' and '~' refers to exclude values.
alpha: ``float``, optional, default: ``1.0``
Alpha value for blending, from 0.0 (transparent) to 1.0 (opaque).
legend_loc: ``str``, optional, default: ``right margin``
Legend location. Can be either "right margin" or "on data".
legend_ncol: ``str``, optional, default: None
Only applicable if legend_loc == "right margin". Set number of columns used to show legends.
palette: ``str``, optional, default: None
Used for setting colors for one categorical attribute (e.g. "black,blue,red,...,yellow").
cmap: ``str``, optional, default: ``YlOrRd``
Used for setting colormap for one numeric attribute.
vmin: ``float``, optional, default: None
Minimum value to show on a numeric scatter plot (feature plot).
vmax: ``float``, optional, default: None
Maximum value to show on a numeric scatter plot (feature plot).
nrows: ``int``, optional, default: None
Number of rows in the figure. If not set, pegasus will figure it out automatically.
ncols: ``int``, optional, default: None
Number of columns in the figure. If not set, pegasus will figure it out automatically.
panel_size: `tuple`, optional (default: `(6, 4)`)
The panel size (width, height) in inches.
left: `float`, optional (default: `0.2`)
This parameter sets the figure's left margin as a fraction of panel's width (left * panel_size[0]).
bottom: `float`, optional (default: `0.15`)
This parameter sets the figure's bottom margin as a fraction of panel's height (bottom * panel_size[1]).
wspace: `float`, optional (default: `0.4`)
This parameter sets the width between panels and also the figure's right margin as a fraction of panel's width (wspace * panel_size[0]).
hspace: `float`, optional (defualt: `0.15`)
This parameter sets the height between panels and also the figure's top margin as a fraction of panel's height (hspace * panel_size[1]).
return_fig: ``bool``, optional, default: ``False``
Return a ``Figure`` object if ``True``; return ``None`` otherwise.
dpi: ``float``, optional, default: 300.0
The resolution of the figure in dots-per-inch.
Returns
-------
`Figure` object
A ``matplotlib.figure.Figure`` object containing the dot plot if ``return_fig == True``
Examples
--------
>>> pg.scatter_groups(data, attr='louvain_labels', groupby='Individual', basis='tsne', nrows = 2, ncols = 4, alpha = 0.5)
>>> pg.scatter_groups(data, attr='anno', groupby='Channel', basis='umap', categories=['new_cat1:channel1,channel2', 'new_cat2:channel3'])
"""
if not isinstance(data, anndata.AnnData):
cur_matkey = data.current_matrix()
if matkey is not None:
assert not isinstance(data, anndata.AnnData)
data.select_matrix(matkey)
x = data.obsm[f"X_{basis}"][:, 0]
y = data.obsm[f"X_{basis}"][:, 1]
# four corners of the plot
corners = np.array(np.meshgrid([x.min(), x.max()], [y.min(), y.max()])).T.reshape(-1, 2)
basis = _transform_basis(basis)
marker_size = _get_marker_size(x.size)
if attr in data.obs:
values = data.obs[attr].values
elif attr in data.var_names:
loc = data.var_names.get_loc(attr)
values = slicing(data.X, col = loc)
else:
obsm_key, sep, component = attr.partition("@")
if (sep != "@") or (obsm_key not in data.obsm) or (not component.isdigit()):
raise KeyError(f"{attr} is not in data.obs, data.var_names or data.obsm!")
values = data.obsm[obsm_key][:, int(component)]
is_cat = is_categorical_dtype(values)
if (not is_cat) and (not is_numeric_dtype(values)):
values = pd.Categorical(values, categories=natsorted(np.unique(values)))
is_cat = True
assert groupby in data.obs
groups = data.obs[groupby].values
if not is_categorical_dtype(groups):
groups = pd.Categorical(groups, categories=natsorted(np.unique(groups)))
restr_obj = RestrictionParser(restrictions)
restr_obj.calc_default(data)
selected = restr_obj.get_satisfied(data)
nsel = selected.sum()
if nsel < data.shape[0]:
x = x[selected]
y = y[selected]
values = values[selected]
groups = groups[selected]
df_g = pd.DataFrame()
if show_full:
df_g["All"] = np.ones(nsel, dtype=bool)
if categories is None:
for cat in groups.categories:
df_g[cat] = groups == cat
else:
cat_obj = RestrictionParser(categories)
for cat, idx in cat_obj.next_category(groups):
df_g[cat] = idx
nrows, ncols = _get_nrows_and_ncols(df_g.shape[1], nrows, ncols)
fig, axes = _get_subplot_layouts(nrows=nrows, ncols=ncols, panel_size=panel_size, dpi=dpi, left=left, bottom=bottom, wspace=wspace, hspace=hspace, squeeze=False)
legend_fontsize = 5 if legend_loc == 'on data' else 10
if is_cat:
labels = values
label_size = labels.categories.size
palette = _get_palette(label_size) if palette is None else np.array(palette.split(","))
legend_ncol = _get_legend_ncol(label_size, legend_ncol)
for i in range(nrows):
for j in range(ncols):
ax = axes[i, j]
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
gid = i * ncols + j
if gid < df_g.shape[1]:
if is_cat:
text_list = []
for k, cat in enumerate(labels.categories):
idx = np.logical_and(df_g.iloc[:, gid].values, labels == cat)
_plot_corners(ax, corners, marker_size)
if idx.sum() > 0:
scatter_kwargs = {"marker": ".", "alpha": alpha, "edgecolors": "none", "rasterized": True}
if legend_loc != "on data":
scatter_kwargs["label"] = str(cat)
else:
text_list.append((np.median(x[idx]), np.median(y[idx]), str(cat)))
ax.scatter(
x[idx],
y[idx],
c=palette[k],
s=marker_size,
**scatter_kwargs,
)
if legend_loc == "right margin":
legend = ax.legend(
loc="center left",
bbox_to_anchor=(1, 0.5),
frameon=False,
fontsize=legend_fontsize,
ncol=legend_ncol,
)
for handle in legend.legendHandles:
handle.set_sizes([300.0])
elif legend_loc == "on data":
texts = []
for px, py, txt in text_list:
texts.append(ax.text(px, py, txt, fontsize=legend_fontsize, fontweight = "bold", ha = "center", va = "center"))
else:
_plot_corners(ax, corners, marker_size)
idx_g = df_g.iloc[:, gid].values
img = ax.scatter(
x[idx_g],
y[idx_g],
s=marker_size,
c=values[idx_g],
marker=".",
alpha=alpha,
edgecolors="none",
cmap=cmap,
vmin=vmin,
vmax=vmax,
rasterized=True,
)
left, bottom, width, height = ax.get_position().bounds
rect = [left + width * (1.0 + 0.05), bottom, width * 0.1, height]
ax_colorbar = fig.add_axes(rect)
fig.colorbar(img, cax=ax_colorbar)
ax.set_title(str(df_g.columns[gid]))
else:
ax.set_frame_on(False)
if i == nrows - 1:
ax.set_xlabel(basis + "1")
if j == 0:
ax.set_ylabel(basis + "2")
if not isinstance(data, anndata.AnnData):
if cur_matkey != data.current_matrix():
data.select_matrix(cur_matkey)
return fig if return_fig else None
def compo_plot(
data: Union[MultimodalData, UnimodalData, anndata.AnnData],
groupby: str,
condition: str,
style: Optional[str] = "frequency",
restrictions: Optional[Union[str, List[str]]] = None,
switch_axes: Optional[bool] = False,
groupby_label: Optional[str] = None,
sort_function: Union[Callable[[List[str]], List[str]], str] = 'natsorted',
panel_size: Optional[Tuple[float, float]] = (6, 4),
palette: Optional[List[str]] = None,
color_unused: bool = False,
left: Optional[float] = 0.15,
bottom: Optional[float] = 0.15,
wspace: Optional[float] = 0.3,
hspace: Optional[float] = 0.15,
return_fig: Optional[bool] = False,
dpi: Optional[float] = 300.0,
**kwargs,
) -> Union[plt.Figure, None]:
"""Generate a composition plot, which shows the percentage of cells from each condition for every cluster.
This function is used to generate composition plots, which are bar plots showing the cell compositions (from different conditions) for each cluster. This type of plots is useful to fast assess library quality and batch effects.
Parameters
----------
data : ``AnnData`` or ``UnimodalData`` or ``MultimodalData`` object
Single cell expression data.
groupby : ``str``
A categorical variable in data.obs that is used to categorize the cells, e.g. cell type.
condition: ``str``
A categorical variable in data.obs that is used to calculate frequency within each category defined by ``groupby``, e.g. donor.
style: ``str``, optional (default: ``frequency``)
Composition plot style. Can be either ``frequency``, or ``normalized``. Within each cluster, the ``frequency`` style show the percentage of cells from each ``condition`` within each category in ``groupby`` (stacked), the ``normalized`` style shows for each category in ``groupby`` the percentage of cells that are also in each ``condition`` over all cells that are in the same ``condition`` (not stacked).
restrictions: ``str`` or ``List[str]``, optional, default: None
A list of restrictions to subset data for plotting. Each restriction takes the format of 'key:value,value...', or 'key:~value,value...'. This restriction selects cells with the ``data.obs[key]`` values belong to 'value,value...' (or not belong to if '~' shows).
switch_axes: ``bool``, optional, default: ``False``
By default, X axis is for groupby, and Y axis for frequencies with respect to condition. If this parameter is ``True``, switch the axes.
groupby_label: ``str``, optional (default ``None``)
Label for the axis displaying ``groupby`` categories. If ``None``, use ``groupby``.
sort_function: ``Union[Callable[List[str], List[str]], str]``, optional, default: ``natsorted``
Function used for sorting both groupby and condition labels. If ``natsorted``, apply natsorted function to sort by natural order. If ``None``, don't sort. Otherwise, a callable function will be applied to the labels for sorting.
panel_size: ``tuple``, optional (default: ``(6, 4)``)
The plot size (width, height) in inches.
palette: ``List[str]``, optional (default: ``None``)
Used for setting colors for categories in ``condition``. Within the list, each string is the color for one category.
left: ``float``, optional (default: ``0.15``)
This parameter sets the figure's left margin as a fraction of panel's width (left * panel_size[0]).
bottom: ``float``, optional (default: ``0.15``)
This parameter sets the figure's bottom margin as a fraction of panel's height (bottom * panel_size[1]).
wspace: ``float``, optional (default: ``0.3``)
This parameter sets the width between panels and also the figure's right margin as a fraction of panel's width (wspace * panel_size[0]).
hspace: ``float``, optional (defualt: ``0.15``)
This parameter sets the height between panels and also the figure's top margin as a fraction of panel's height (hspace * panel_size[1]).
return_fig: ``bool``, optional, default: ``False``
Return a ``Figure`` object if ``True``; return ``None`` otherwise.
dpi: ``float``, optional, default: ``300.0``
The resolution in dots per inch.
Returns
-------
``Figure`` object
A ``matplotlib.figure.Figure`` object containing the dot plot if ``return_fig == True``
Examples
--------
>>> fig = pg.compo_plot(data, 'louvain_labels', 'Donor', style = 'normalized')
"""
if groupby_label is None:
groupby_label = groupby
fig, ax = _get_subplot_layouts(panel_size=panel_size, dpi=dpi, left=left, bottom=bottom, wspace=wspace, hspace=hspace) # default nrows = 1 & ncols = 1
restr_obj = RestrictionParser(restrictions)
restr_obj.calc_default(data)
selected = restr_obj.get_satisfied(data)
df = pd.crosstab(data.obs.loc[selected, groupby], data.obs.loc[selected, condition])
index_values = df.index.tolist()
column_values = df.columns.tolist()
if sort_function == "natsorted":
sort_function = natsorted
if callable(sort_function):
index_values = sort_function(index_values)
column_values = sort_function(column_values)
if switch_axes:
index_values.reverse()
df = df.reindex(index = index_values, columns = column_values)
if style == "frequency":
df = df.div(df.sum(axis=1), axis=0) * 100.0
else:
assert style == "normalized"
df = df.div(df.sum(axis=0), axis=1) * 100.0
if color_unused:
if palette is None:
color_list = _get_palette(data.obs[condition].cat.categories.size)
else:
assert len(palette) >= data.obs[condition].cat.categories.size, "The palette provided has fewer colors than needed!"
color_idx = df.columns.map(data.obs[condition].cat.categories.get_loc)
color_list = palette[color_idx]
else:
if palette is None:
color_list = _get_palette(df.shape[1])
else:
assert len(palette) >= df.shape[1], "The palette provided has fewer colors than needed!"
color_list = palette[0:df.shape[1]]
df.plot(
kind = "bar" if not switch_axes else "barh",
stacked = style == "frequency",
legend = False,
color = color_list,
ax = ax,
)
ax.grid(False)
if not switch_axes:
ax.set_xlabel(groupby_label)
ax.set_ylabel("Percentage")
else:
ax.set_xlabel("Percentage")
ax.set_ylabel(groupby_label)
ax.legend(loc="center left", bbox_to_anchor=(1.05, 0.5))
if len(max(df.index.astype(str), key=len)) >= 5:
ax.set_xticklabels(ax.get_xticklabels(), rotation=-45, ha='left')
return fig if return_fig else None
def violin(
data: Union[MultimodalData, UnimodalData, anndata.AnnData],
attrs: Union[str, List[str]],
groupby: str,
hue: Optional[str] = None,
matkey: Optional[str] = None,
stripplot: Optional[bool] = False,
inner: Optional[str] = None,
scale: Optional[str] = 'width',
panel_size: Optional[Tuple[float, float]] = (8, 0.5),
palette: Optional[List[str]] = None,
left: Optional[float] = 0.15,
bottom: Optional[float] = 0.15,
wspace: Optional[float] = 0.1,
ylabel: Optional[str] = None,
return_fig: Optional[bool] = False,
dpi: Optional[float] = 300.0,
**kwargs,
) -> Union[plt.Figure, None]:
"""
Generate a stacked violin plot.
Parameters
----------
data: ``AnnData`` or ``MultimodalData`` or ``UnimodalData`` object
Single-cell expression data.
attrs: ``str`` or ``List[str]``
Cell attributes or features to plot.
Cell attributes must exist in ``data.obs`` and must be numeric.
Features must exist in ``data.var``.
groupby: ``str``
A categorical variable in data.obs that is used to categorize the cells, e.g. Clusters.
hue: ``str``, optional, default: None
'hue' should be a categorical variable in data.obs that has only two levels. Set 'hue' will show us split violin plots.
matkey: ``str``, optional, default: ``None``
If matkey is set, select matrix with matkey as keyword in the current modality. Only works for MultimodalData or UnimodalData objects.
stripplot: ``bool``, optional, default: ``False``
Attach a stripplot to the violinplot or not. This option will be automatically turn off if 'hue' is set.
inner: ``str``, optional, default: ``None``
Representation of the datapoints in the violin interior:
- If ``box``, draw a miniature boxplot.
- If ``quartiles``, draw the quartiles of the distribution.
- If ``point`` or ``stick``, show each underlying datapoint.
- If ``None``, will draw unadorned violins.
scale: ``str``, optional, default: ``width``
The method used to scale the width of each violin:
- If ``width``, each violin will have the same width.
- If ``area``, each violin will have the same area.
- If ``count``, the width of the violins will be scaled by the number of observations in that bin.
panel_size: ``Tuple[float, float]``, optional, default: ``(8, 0.5)``
The size (width, height) in inches of each violin panel.
palette: ``List[str]``, optional (default: ``None``)
Used for setting colors for categories in ``groupby``. Within the list, each string is the color for one category.
left: ``float``, optional, default: ``0.15``
This parameter sets the figure's left margin as a fraction of panel's width (left * panel_size[0]).
bottom: ``float``, optional, default: ``0.15``
This parameter sets the figure's bottom margin as a fraction of panel's height (bottom * panel_size[1]).
wspace: ``float``, optional, default: ``0.1``
This parameter sets the width between panels and also the figure's right margin as a fraction of panel's width (wspace * panel_size[0]).
ylabel: ``str``, optional, default: ``None``
Y-axis label. No label to show if ``None``.
return_fig: ``bool``, optional, default: ``False``
Return a ``Figure`` object if ``True``; return ``None`` otherwise.
dpi: ``float``, optional, default: ``300.0``
The resolution in dots per inch.
kwargs
Are passed to ``seaborn.violinplot``.
Returns
-------
``Figure`` object
A ``matplotlib.figure.Figure`` object containing the dot plot if ``show == False``
Examples
--------
>>> pg.violin(data, attrs=['CD14', 'TRAC', 'CD34'], groupby='louvain_labels')
"""
if not is_list_like(attrs):
attrs = [attrs]
if not isinstance(data, anndata.AnnData):
cur_matkey = data.current_matrix()
if matkey is not None:
assert not isinstance(data, anndata.AnnData)
data.select_matrix(matkey)
nrows = len(attrs)
fig, axes = _get_subplot_layouts(nrows=nrows, ncols=1, panel_size=panel_size, dpi=dpi, left=left, bottom=bottom, wspace=wspace, hspace=0, squeeze=False, sharey=False)
obs_keys = []
genes = []
for key in attrs:
if key in data.obs:
assert is_numeric_dtype(data.obs[key])
obs_keys.append(key)
else:
if key not in data.var_names:
logger.warning(f"Cannot find gene {key}. Please make sure all genes are included in data.var_names before running this function!")
return None
genes.append(key)
df_list = [pd.DataFrame({"label": data.obs[groupby].values})]
if hue is not None:
df_list.append(pd.DataFrame({hue: data.obs[hue].values}))
stripplot = False
if len(obs_keys) > 0:
df_list.append(data.obs[obs_keys].reset_index(drop=True))
if len(genes) > 0:
expr_mat = slicing(data[:, genes].X)
df_list.append(pd.DataFrame(data=expr_mat, columns=genes))
df = pd.concat(df_list, axis = 1)
for i in range(nrows):
ax = axes[i, 0]
if stripplot:
sns.stripplot(x="label", y=attrs[i], hue = hue, data=df, ax=ax, size=1, color="k", jitter=True)
sns.violinplot(x="label", y=attrs[i], hue = hue, data=df, inner=inner, linewidth=1, ax=ax, cut=0, scale=scale, split=True, palette=palette, **kwargs)
ax.grid(False)
if hue is not None:
if i == 0:
ax.legend(loc="center left", bbox_to_anchor=(1.02, 0.5))
else:
ax.get_legend().set_visible(False)
if i < nrows - 1:
ax.set_xlabel("")
else:
ax.set_xlabel(groupby)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
ax.set_ylabel(attrs[i], labelpad=8, rotation=0, horizontalalignment='right', fontsize='medium')
ax.tick_params(axis='y', right=True, left=False, labelright=True, labelleft=False, labelsize='small')
if ylabel is not None:
fig.text(0.02, 0.5, ylabel, rotation="vertical", fontsize="xx-large")
# Reset current matrix if needed.
if not isinstance(data, anndata.AnnData):
if data.current_matrix() != cur_matkey:
data.select_matrix(cur_matkey)
return fig if return_fig else None
def heatmap(
data: Union[MultimodalData, UnimodalData, anndata.AnnData],
attrs: Union[str, List[str]],
groupby: str,
matkey: Optional[str] = None,
on_average: bool = True,
switch_axes: bool = False,
attrs_cluster: Optional[bool] = False,
attrs_dendrogram: Optional[bool] = True,
groupby_cluster: Optional[bool] = True,
groupby_dendrogram: Optional[bool] = True,
attrs_labelsize: Optional[float] = 10.0,
groupby_labelsize: Optional[float] = 10.0,
cbar_labelsize: Optional[float] = 10.0,
panel_size: Tuple[float, float] = (10, 10),
return_fig: Optional[bool] = False,
dpi: Optional[float] = 300.0,
**kwargs,
) -> Union[plt.Figure, None]:
"""
Generate a heatmap.
Parameters
-----------
data: ``AnnData`` or ``MultimodalData`` or ``UnimodalData`` object
Single-cell expression data.
attrs: ``str`` or ``List[str]``
Cell attributes or features to plot.
Cell attributes must exist in ``data.obs`` and must be numeric.
Features must exist in ``data.var``.
By default, attrs are plotted as columns.
groupby: ``str``
A categorical variable in data.obs that is used to categorize the cells, e.g. Clusters.
By default, data.obs['groupby'] is plotted as rows.
matkey: ``str``, optional, default: ``None``
If matkey is set, select matrix with matkey as keyword in the current modality. Only works for MultimodalData or UnimodalData objects.
on_average: ``bool``, optional, default: ``True``
If ``True``, plot cluster average gene expression (i.e. show a Matrixplot); otherwise, plot a general heatmap.
switch_axes: ``bool``, optional, default: ``False``
By default, X axis is for attributes, and Y axis for clusters. If this parameter is ``True``, switch the axes.
Moreover, with ``on_average`` being ``False``, if ``switch_axes`` is ``False``, ``row_cluster`` is enforced to be ``False``; if ``switch_axes`` is ``True``, ``col_cluster`` is enforced to be ``False``.
attrs_cluster: ``bool``, optional, default: ``False``
Cluster attributes and generate a attribute-wise dendrogram.
attrs_dendrogram: ``bool``, optional, default: ``True``
Only matters if attrs_cluster is True. Show the dendrogram if this option is True.
groupby_cluster: ``bool``, optional, default: ``True``
Cluster data.obs['groupby'] and generate a cluster-wise dendrogram.
groupby_dendrogram: ``bool``, optional, default: ``True``
Only matters if groupby_cluster is True. Show the dendrogram if this option is True.
attrs_labelsize: ``float``, optional, default: 10.0
Fontsize for labels of attrs.
groupby_labelsize: ``float``, optional, default: 10.0
Fontsize for labels of data.obs['groupby'].
cbar_labelsize: ``float``, optional, default: 10.0
Fontsize of the color bar.
panel_size: ``Tuple[float, float]``, optional, default: ``(10, 10)``
Overall size of the heatmap in ``(width, height)`` form.
return_fig: ``bool``, optional, default: ``False``
Return a ``Figure`` object if ``True``; return ``None`` otherwise.
dpi: ``float``, optional, default: ``300.0``
The resolution in dots per inch.
kwargs
Are passed to ``seaborn.heatmap``.
.. _colormap documentation: https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
Returns
-------
``Figure`` object
A ``matplotlib.figure.Figure`` object containing the dot plot if ``return_fig == True``
Examples
--------
>>> pg.heatmap(data, genes=['CD14', 'TRAC', 'CD34'], groupby='louvain_labels')
"""
if not isinstance(data, anndata.AnnData):
cur_matkey = data.current_matrix()
if matkey is not None:
assert not isinstance(data, anndata.AnnData)
data.select_matrix(matkey)
if isinstance(attrs, str):
attrs = [attrs]
obs_keys = []
genes = []
for key in attrs:
if key in data.obs:
assert is_numeric_dtype(data.obs[key])
obs_keys.append(key)
else:
if key not in data.var_names:
logger.warning(f"Cannot find gene {key}. Please make sure all genes are included in data.var_names before running this function!")
return None
genes.append(key)
clusters = data.obs[groupby].values
if not is_categorical_dtype(clusters):
clusters = pd.Categorical(clusters)
else:
clusters = clusters.remove_unused_categories()
df_list = [pd.DataFrame({'cluster_name': clusters})]
if len(obs_keys) > 0:
df_list.append(data.obs[obs_keys].reset_index(drop=True))
if len(genes) > 0:
expr_mat = slicing(data[:, genes].X)
df_list.append(pd.DataFrame(data=expr_mat, columns=genes))
df = pd.concat(df_list, axis = 1)
attr_names = df.columns[1:].values
if on_average:
if not 'cmap' in kwargs.keys():
kwargs['cmap'] = 'Reds'
df = df.groupby('cluster_name').mean()
cluster_ids = df.index
else:
cluster_ids = df.pop('cluster_name').values
if not groupby_cluster:
idx = cluster_ids.argsort(kind = 'mergesort')
df = df.iloc[idx, :] # organize df by category order
cluster_ids = cluster_ids[idx]
cell_colors = np.zeros(df.shape[0], dtype=object)
palette = _get_palette(cluster_ids.categories.size)
for k, cat in enumerate(cluster_ids.categories):
cell_colors[cluster_ids == cat] = palette[k]
if not switch_axes:
cg = sns.clustermap(
data=df,
row_colors=cell_colors if not on_average else None,
col_colors=None,
row_cluster=groupby_cluster,
col_cluster=attrs_cluster,
linewidths=0,
yticklabels=cluster_ids if on_average else [],
xticklabels=attr_names,
figsize=panel_size,
**kwargs,
)
cg.ax_heatmap.set_ylabel("")
if attrs_labelsize is not None:
cg.ax_heatmap.tick_params(axis='x', labelsize=attrs_labelsize, labelrotation=75)
else:
cg = sns.clustermap(
data=df.T,
row_colors=None,
col_colors=cell_colors if not on_average else None,
row_cluster=attrs_cluster,
col_cluster=groupby_cluster,
linewidths=0,
yticklabels=attr_names,
xticklabels=cluster_ids if on_average else [],
figsize=panel_size,
**kwargs,
)
cg.ax_heatmap.set_xlabel("")
if attrs_labelsize is not None:
cg.ax_heatmap.tick_params(axis='y', labelsize=attrs_labelsize)
show_row_dendrogram = (attrs_cluster and attrs_dendrogram) if switch_axes else (groupby_cluster and groupby_dendrogram)
show_col_dendrogram = (groupby_cluster and groupby_dendrogram) if switch_axes else (attrs_cluster and attrs_dendrogram)
if show_row_dendrogram:
cg.ax_heatmap.yaxis.tick_right()
cg.ax_row_dendrogram.set_visible(True)
# Avoid overlap of colorbar and row dendrogram.
color_box = cg.ax_cbar.get_position()
square_plot = cg.ax_heatmap.get_position()
if square_plot.y1 > color_box.y0:
y_diff = square_plot.y1 - color_box.y0
color_box.y0 = square_plot.y1
color_box.y1 += y_diff
cg.ax_cbar.set_position(color_box)
else:
cg.ax_heatmap.yaxis.tick_left()
cg.ax_row_dendrogram.set_visible(False)
# Move the colorbar to the right-side.
color_box = cg.ax_heatmap.get_position()
color_box.x0 = color_box.x1 + 0.04
color_box.x1 = color_box.x0 + 0.02
cg.ax_cbar.set_position(color_box)
cg.ax_cbar.yaxis.set_ticks_position("right")
if show_col_dendrogram:
cg.ax_heatmap.xaxis.tick_bottom()
cg.ax_col_dendrogram.set_visible(True)
else:
cg.ax_heatmap.xaxis.tick_top()
cg.ax_col_dendrogram.set_visible(False)
cg.ax_cbar.tick_params(labelsize=cbar_labelsize)
cg.fig.dpi = dpi
if not on_average:
if groupby_cluster:
from matplotlib.patches import Patch
legend_elements = [Patch(color = color, label = label) for color, label in zip(palette, cluster_ids.categories)]
cg.ax_heatmap.legend(handles=legend_elements, loc='lower left', bbox_to_anchor = (1.02, 1.02), fontsize = groupby_labelsize)
else:
values = cluster_ids.value_counts().values
ticks = np.cumsum(values) - values / 2
labels = cluster_ids.categories
if not switch_axes:
cg.ax_row_colors.yaxis.tick_left()
cg.ax_row_colors.set_yticks(ticks)
cg.ax_row_colors.set_yticklabels(labels)
cg.ax_row_colors.tick_params(axis='y', left = False, length=10)
else:
cg.ax_col_colors.xaxis.tick_top()
cg.ax_col_colors.set_xticks(ticks)
cg.ax_col_colors.set_xticklabels(labels, rotation=45)
cg.ax_col_colors.tick_params(axis='x', top = False, labelsize = groupby_labelsize, length=10)
if not isinstance(data, anndata.AnnData):
if cur_matkey != data.current_matrix():
data.select_matrix(cur_matkey)
return cg.fig if return_fig else None
def dotplot(
data: Union[MultimodalData, UnimodalData, anndata.AnnData],
genes: Union[str, List[str]],
groupby: str,
reduce_function: Callable[[np.ndarray], float] = np.mean,
fraction_min: float = 0,
fraction_max: float = None,
dot_min: int = 0,
dot_max: int = 20,
switch_axes: bool = False,
cmap: Union[str, List[str], Tuple[str]] = 'Reds',
sort_function: Union[Callable[[List[str]], List[str]], str] = 'natsorted',
grid: bool = True,
return_fig: Optional[bool] = False,
dpi: Optional[float] = 300.0,
**kwds,
) -> Union[plt.Figure, None]:
"""
Generate a dot plot.
Parameters
----------
data: ``AnnData`` or ``UnimodalData`` or ``MultimodalData`` object
Single cell expression data.
genes: ``str`` or ``List[str]``
Features to plot.
groupby: ``str``
A categorical variable in data.obs that is used to categorize the cells, e.g. Clusters.
reduce_function: ``Callable[[np.ndarray], float]``, optional, default: ``np.mean``
Function to calculate statistic on expression data. Default is mean.
fraction_min: ``float``, optional, default: ``0``.
Minimum fraction of expressing cells to consider.
fraction_max: ``float``, optional, default: ``None``.
Maximum fraction of expressing cells to consider. If ``None``, use the maximum value from data.
dot_min: ``int``, optional, default: ``0``.
Minimum size in pixels for dots.
dot_max: ``int``, optional, default: ``20``.
Maximum size in pixels for dots.
switch_axes: ``bool``, optional, default: ``False``.
If ``True``, switch X and Y axes.
cmap: ``str`` or ``List[str]`` or ``Tuple[str]``, optional, default: ``Reds``
Color map.
sort_function: ``Union[Callable[List[str], List[str]], str]``, optional, default: ``natsorted``
Function used for sorting groupby labels. If ``natsorted``, apply natsorted function to sort by natural order. If ``None``, don't sort. Otherwise, a callable function will be applied to the labels for sorting.
grid: ``bool``, optional, default: ``True``
If ``True``, plot grids.
return_fig: ``bool``, optional, default: ``False``
Return a ``Figure`` object if ``True``; return ``None`` otherwise.
dpi: ``float``, optional, default: ``300.0``
The resolution in dots per inch.
**kwds:
Are passed to ``matplotlib.pyplot.scatter``.
Returns
-------
``Figure`` object
A ``matplotlib.figure.Figure`` object containing the dot plot if ``return_fig == True``
Examples
--------
>>> pg.dotplot(data, genes = ['CD14', 'TRAC', 'CD34'], groupby = 'louvain_labels')
"""
sns.set(font_scale=0.7, style='whitegrid')
if not is_list_like(genes):
geness = [genes]
keywords = dict(cmap=cmap)
keywords.update(kwds)
from scipy.sparse import issparse
X = slicing(data[:, genes].X)
df = | pd.DataFrame(data=X, columns=genes) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 3 13:21:38 2018
@author: ericgrant
"""
import pandas as pd
from omnisci_utils import get_credentials
from omnisci_utils import wake_and_connect_to_mapd
from omnisci_utils import disconnect_mapd
from omnisci_utils import get_table_mapd
# VARIABLES
#connection
mapdhost = "use2-api.mapd.cloud"
mapdport = 443
mapdprotocol = "https"
mapddbname = "mapd"
mapduser = "mapd"
#paths
file_path = '/Users/ericgrant/Downloads/OKR_Dashboards/xfer/'
# API keys file
omnisci_keyfile = file_path + 'omnisci_keys.json'
# table to retreive
table_name = 'oss_git_views'
tables_and_files = [
# github views
(table_name, file_path + table_name + '_out.csv')
]
# FUNCTIONS
# MAIN
def main():
# connect to MapD
dfcreds = get_credentials(omnisci_keyfile)
connection = wake_and_connect_to_mapd(dfcreds['write_key_name'], dfcreds['write_key_secret'], mapdhost, mapddbname)
# loop through tables
if connection == 'RETRY':
print('could not wake OmniSci; exiting')
else:
for table, file in tables_and_files:
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
from typing import IO
import pandas as pd
from PySDDP.dessem.script.templates.cadterm import CadTermTemplate
COMENTARIO = '&'
class CadTerm(CadTermTemplate):
"""
Classe que contem todos os elementos comuns a qualquer versao do arquivo CadTerm do Dessem.
Esta classe tem como intuito fornecer duck typing para a classe Dessem e ainda adicionar um nivel de especificacao
dentro da fabrica. Alem disso esta classe deve passar adiante a responsabilidade da implementacao dos metodos de
leitura e escrita
"""
def __init__(self):
super().__init__()
self.cadusit = dict()
self.cadunidt = dict()
self.cadconf = dict()
self.cadmin = dict()
self.cadusit_df: pd.DataFrame()
self.cadunidt_df: | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# -------------------------------------------------------------------
# **TD DSA 2021 de <NAME> - rapport de <NAME>**
# ------------------------- -------------------------------------
# # Analyse descriptive
# ## Setup
# In[5]:
get_ipython().system('pip install textblob')
# In[6]:
get_ipython().system('pip install emot')
# In[7]:
get_ipython().system('pip install wordcloud')
# In[8]:
#Temps et fichiers
import os
import warnings
import time
from datetime import timedelta
#Manipulation de données
import pandas as pd
import numpy as np
# Text
from collections import Counter
import nltk
nltk.download('punkt')
from nltk.tokenize import word_tokenize
nltk.download('stopwords')
from nltk.corpus import stopwords
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.stem import WordNetLemmatizer
nltk.download('wordnet')
from nltk.util import ngrams
from textblob import TextBlob
import string
import re
import spacy
from emot.emo_unicode import UNICODE_EMO, EMOTICONS
#Visualisation
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from wordcloud import WordCloud
#Tracking d'expérience
import mlflow
import mlflow.sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
# ### Utilisation du package
# In[9]:
#Cette cellule permet d'appeler la version packagée du projet et d'en assurer le reload avant appel des fonctions
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
# In[10]:
from dsa_sentiment.scripts.make_dataset import load_data
from dsa_sentiment.scripts.evaluate import eval_metrics
from dsa_sentiment.scripts.make_dataset import Preprocess_StrLower, Preprocess_transform_target
# ### Configuration de l'experiment MLFlow
# In[11]:
mlflow.tracking.get_tracking_uri()
# ### Chargement des données
# In[12]:
# On Importe les données
#df
df_train=pd.read_parquet('/mnt/data/interim/df_train.gzip')
df_val=pd.read_parquet('/mnt/data/interim/df_val.gzip')
df_test=pd.read_parquet('/mnt/data/interim/df_test.gzip')
#X
X_train=pd.read_parquet('/mnt/data/interim/X_train.gzip')
X_val=pd.read_parquet('/mnt/data/interim/X_val.gzip')
X_test=pd.read_parquet('/mnt/data/interim/X_test.gzip')
#y
y_train=pd.read_parquet('/mnt/data/interim/y_train.gzip')
y_val=pd.read_parquet('/mnt/data/interim/y_val.gzip')
y_test=pd.read_parquet('/mnt/data/interim/y_test.gzip')
# ## EDA
# On commence par nalyser l'équilibre des différentes classes de sentiments
# In[13]:
df = df_train
df.head()
# ### Analyse de l'équilibre du jeu d'entrainement par label
# In[14]:
fig = px.histogram(df, x="sentiment", color="sentiment", title = 'Nombre de tweets par sentiment')
fig.show()
# Il existe un léger déséquilibre dans les classes en faveur des sentiments `neutral`
# ### Analyse des champs lexicaux par label
# Pour la suite des travaux, on créée un corpus contenant la concaténation de tous les tweets d'une certaine tonalité.
# In[15]:
def create_corpus(text_series):
text = text_series.apply(lambda x : x.split())
text = sum(text, [])
return text
# In[16]:
positive_text = create_corpus(df['text'][df['sentiment']=='positive'])
negative_text = create_corpus(df['text'][df['sentiment']=='negative'])
neutral_text = create_corpus(df['text'][df['sentiment']=='neutral'])
# Il devient alors possible de crééer des histogrammes représentant la fréquence de N-grams dans un corpus =donné
# In[17]:
def plot_freq_dist(text_corpus, nb=30, ngram=1, title=''):
'''
Plot the most common words
inputs:
text_corpus : a corpus of words
nb : number of words to plot
title : graph title
returns:
nothing, plots the graph
'''
freq_pos=Counter(ngrams(create_corpus(pd.Series(text_corpus)),ngram))
pos_df = pd.DataFrame({
"words":[' '.join(items) for items in list(freq_pos.keys())],
"Count":list(freq_pos.values())
})
common_pos= pos_df.nlargest(columns="Count", n=30)
fig = px.bar(common_pos, x="words", y="Count", labels={"words": "Words", "Count":"Frequency"}, title=title)
fig.show();
# In[18]:
plot_freq_dist(positive_text, title = 'Most common words associated with positive tweets')
# Le résultat montre la prépondérance des `stopwords`, ces mots d'articulation, qui sont très communs et gènent l'identifiaction de mots clefs propres à un document / ensemble de documents spécifiques.
#
# Il convient donc d'effectuer des opérations de retraitement du texte pour analyse.
# ### Preprocessing
# Parmi les éléments propres aux tweets qui peuvent avoir un impact sur la suite on compte :
#
# - les mots clefs marqués par un `#`
# - les noms d'utilisateurs commençant par un `@`
# - les emoticons et emojis
# - les nombre de mots en MAJUSCULES
# - la répétition de caractères pour marquer l'emphase `!!!!`, `looooong`, ou l'autocensure `f***`
# - les fautes de frappes (mots de moins de 2 caractères)
# Afin de disposer de traitements homogènes, repoductibles et paramétrables, une fonction spécifique est créée. Les différenst paramètres pourront être testés dans les phase de modélistaion ultérieures.
# source [preprocess](https://www.kaggle.com/stoicstatic/twitter-sentiment-analysis-for-beginners)
# In[57]:
def preprocess_text(text_series,
apply_lemmatizer=True,
apply_lowercase=True,
apply_url_standerdisation=True,
apply_user_standerdisation=True,
apply_emoticon_to_words=True,
apply_stopwords_removal=True,
apply_shortwords_removal=True,
apply_non_alphabetical_removal=True,
apply_only_2_consecutive_charac=True
):
'''
Main preprocess function
inputs:
text_series : a pandas Series object with text to preprocess
outputs:
a preprocessed pandas Series object
'''
processedText = []
if apply_lemmatizer:
# Create Lemmatizer and Stemmer.
wordLemm = WordNetLemmatizer()
# Defining regex patterns.
urlPattern = r"((http://)[^ ]*|(https://)[^ ]*|( www\.)[^ ]*)"
userPattern = '@[^\s]+'
alphaPattern = r"[^(\w|\*|(!){2}|#)]"
sequencePattern = r"(.)\1\1+"
seqReplacePattern = r"\1\1"
for tweet in text_series:
if apply_lowercase:
tweet = tweet.lower()
if apply_url_standerdisation:
# Replace all URls with 'URL'
tweet = re.sub(urlPattern,' URL',tweet)
if apply_user_standerdisation:
# Replace @USERNAME to 'USER'.
tweet = re.sub(userPattern,' USER', tweet)
if apply_emoticon_to_words:
# Replace all emojis.
for emo in EMOTICONS:
#refactor outputs so that we come up with a single word when/if text spliting afterwards
val = "_".join(EMOTICONS[emo].replace(",","").split())
val='EMO_'+val
tweet = tweet.replace(emo, ' '+val+' ')
for emot in UNICODE_EMO:
val = "_".join(UNICODE_EMO[emot].replace(",","").replace(":","").split())
val='EMO_'+val
tweet = tweet.replace(emo, ' '+val+' ')
if apply_only_2_consecutive_charac:
# Replace 3 or more consecutive letters by 2 letter.
tweet = re.sub(sequencePattern, seqReplacePattern, tweet)
if apply_non_alphabetical_removal:
# Replace all non alphabets.
tweet = re.sub(alphaPattern, " ", tweet)
tweetwords = ''
for word in tweet.split():
# Checking if the word is a stopword.
if apply_stopwords_removal:
if word in stopwords.words('english'):
word=''
else:
word=word
#if word not in stopwordlist:
if apply_shortwords_removal:
if len(word)<=1:
word=''
else:
word=word
# Lemmatizing the word.
if apply_lemmatizer:
word = wordLemm.lemmatize(word)
else:
word=word
tweetwords += (word+' ')
processedText.append(tweetwords)
return processedText
# In[20]:
positive_text_prepro = preprocess_text(df['text'][df['sentiment']=='positive'], apply_lemmatizer=False, apply_non_alphabetical_removal=True)
# In[56]:
| pd.Series(positive_text_prepro) | pandas.Series |
from pathlib import Path
from typing import List, Tuple
import matplotlib.pyplot as plt
import pandas as pd
from pylossmap import BLMData
from pylossmap.lossmap import LossMap
from tqdm.auto import tqdm
def ufo_stable_proton(ufo_meta: pd.DataFrame) -> pd.DataFrame:
ufo_meta = ufo_meta[ufo_meta["beam_mode"] == "STABLE"]
ufo_meta = ufo_meta[ufo_meta["particle_b1"] == "protons"]
ufo_meta = ufo_meta[ufo_meta["particle_b2"] == "protons"]
return ufo_meta
def ufo_on_blms(ufo_meta: pd.DataFrame, blms: List[str]) -> pd.DataFrame:
"""Only keep ufos which occur on the provided blms.
Args:
ufo_meta: metadata of the ufo events
blms: keep ufos which occur on these blms
Returns:
The filtered ufo metadata.
"""
blms_in_ufo = list(set(blms) & set(ufo_meta["blm"].unique()))
ufo_meta = ufo_meta.set_index("blm").loc[blms_in_ufo].reset_index()
return ufo_meta
def load_raw_fill(file_path: Path) -> BLMData:
"""Load the raw blm data.
Args:
file_path: the path to the hdf file
Returns:
The raw BLM data.
"""
blm_data = BLMData.load(file_path)
blm_data.df.drop_duplicates(inplace=True)
return blm_data
def get_ufo_data(ufo_meta: pd.DataFrame, raw_data_dir: Path) -> pd.DataFrame:
"""Load the ufo event blm data.
Args:
ufo_meta: metadata of the ufo events
raw_data_dir: directory containing the raw blm data
Returns:
The raw blm data.
"""
ufo_blm_data = []
for idx, row in tqdm(ufo_meta.reset_index().iterrows(), total=len(ufo_meta)):
# try:
print(idx, row.datetime, row.blm, row.fill)
try:
blm_data = load_raw_fill(raw_data_dir / f"{row.fill}.h5")
except FileNotFoundError:
print(f"file not found {row.fill}")
continue
loss_map = blm_data.loss_map(row.datetime + pd.Timedelta("1s"))
ufo_blm_data.append(loss_map.df["data"])
return | pd.DataFrame(ufo_blm_data) | pandas.DataFrame |
import json
import logging
import random
import re
import shutil
import tempfile
from collections import Counter
from pathlib import Path
import pandas as pd
import pkg_resources
import string_utils
from git import Repo as GitRepo
from rich.progress import Progress
from .qlearning import compute_dataset
from .training import create_snippet_model
EXCLUDED_NAMES = set(['changelog', 'codeowners', 'contribute',
'docker-compose', 'dockerfile', 'jenkinsfile', 'license',
'makefile', 'package', 'package-lock'])
EXCLUDED_EXTS = set(['bin', 'csv', 'gz', 'jpg', 'md', 'pdf', 'png', 'rst',
'svg', 'txt', 'yml', 'zip'])
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class ExtractorGenerator:
def generate_leak_snippets(self, repo_url, num_extracts=30):
""" Generate the extractor model adapted to a repository.
Parameters
----------
repo_url: str
The url of the repository
num_extracts: int, optional
The maximum number of extracts needed (default `30`)
Returns
-------
str
The name of the model folder
str
The name of the binary for the extractor model
"""
# Generate the corpus for the repo
corpus = self.build_corpus(repo_url, num_extracts)
try:
return self.train_model(corpus, repo_url)
except FileExistsError:
logger.warning('Model for this developer already created. '
'Do not generate a new one.')
# Return the existing one
return self._search_model_extractor(repo_url)
def _clone_git_repo(self, git_url):
""" Clone git repository. """
project_path = tempfile.mkdtemp()
GitRepo.clone_from(git_url, project_path)
return project_path
def _get_relevant_files(self, local_repo_path):
""" Sort the files of this repository according to their relevance. The
relevance of a file is calculated as the number of commits which
changed it.
Parameters
----------
local_repo_path: str
The local path of the repo (cloned from github)
Returns
-------
list
A list of file names, sorted by relevance
"""
r = GitRepo(local_repo_path)
all_commits = r.git.log('--name-only', '--pretty=format:').split()
counted_commits = Counter(all_commits)
# Sort the files according to the number of commits they appear in
sorted_commits = sorted(counted_commits.items(),
key=lambda x: x[1],
reverse=True)
# Return the file names sorted per commits number
return list(zip(*sorted_commits))[0]
def _search_model_extractor(self, repo_url):
""" Find the existing extractor binary.
If the model for this developer has already been generated, then we
should find it in the `models_data` folder (i.e., the default folder
for the ML models).
Parameters
----------
repo_url: str
The url of the repository
Returns
-------
str
The name of the model folder
str
The name of the binary for the extractor model
"""
# Find model folder
# The model name is the name of the author of the repository
model_name = 'snippet_model_%s' % repo_url.split('/')[-2]
# It is stored in the models_data folder
models_data = Path(pkg_resources.resource_filename('credentialdigger',
'models_data'))
dev_model = models_data / model_name
# Find extractor binary
# Get name and version from the metafile
with open(dev_model / 'meta.json', 'r') as f:
meta = json.loads(f.read())
inner_folder = dev_model / ('%s-%s' % (meta['name'], meta['version']))
# There should be only one binary in the inner folder
extractor_file = list(inner_folder.glob('**/*.bin'))[0]
return dev_model.name, extractor_file.name
def build_corpus(self, repo_url, num_extracts):
""" Build the corpus for this repo.
Parameters
----------
repo_url: str
The url of the repository
num_extracts: int
The maximum number of extracts needed
Returns
-------
list
A list of strings (i.e., the extracts)
"""
# Clone the repo from Github (the scanner deletes it when it finishes
# its tasks)
repo_path = self._clone_git_repo(repo_url)
# Get the ranking of the files of this repo
ranking = self._get_relevant_files(repo_path)
# Build the corpus
repo_local_path = Path(repo_path)
corpus = []
fi = 0
while fi < len(ranking) and len(corpus) < num_extracts:
current = repo_local_path / ranking[fi]
# Some files cannot be used to produce extracts
pp = Path(current).name
if pp[0] == '.' or pp.split('.')[-1] in EXCLUDED_EXTS or \
pp.split('.')[0].lower() in EXCLUDED_NAMES:
fi += 1
continue
try:
with open(current, 'r') as f:
# Extend the corpus with the extracts found in this file
corpus.extend(self._get_extracts(f.read()))
except UnicodeDecodeError:
# If the read raises this exception, then either the language
# uses a different charset or the file may be a csv (or a
# binary). In both cases, skip it.
# print('Skip file %s (encoding error)' % current)
pass
except FileNotFoundError:
# If the read raises this exception, then the file has been
# deleted from the repository. In this case, ignore it (since
# for the generator we only need the stylometry of the
# developer, the content is not important).
# print('Skip file %s (deleted)' % current)
pass
fi += 1
# Delete local repo folder
shutil.rmtree(repo_path)
return corpus
def _get_extracts(self, code):
""" Use the code to produce extracts.
Parameters
----------
code: str
The content of a file
Returns
-------
list
A list of extracts (i.e., a list of strings)
"""
rows = code.split('\n')
extracts = []
# If the code is shorter than 10 lines, we ignore this file
if 10 <= len(rows) < 15:
# If the code is 10 to 15 lines, we use the whole file as corpus
extracts.append(code)
elif len(rows) >= 15:
# If the code is longer than 15 lines, we split it into multiple
# extracts of lenght generated randomly (10 to 15 lines each)
while len(rows) > 10:
# Generate an extract using the first r rows, with r a random
# number between 10 and 20
r = random.randint(10, 20)
extracts.append('\n'.join(rows[:r]))
# Remove the first r rows
rows = rows[r + 1:]
return extracts
def train_model(self, corpus, repo_url, training_data_size=75000,
actions_n=12, states_n=13, alpha=0.5, gamma=0.85,
epochs_basis=50, extract_max_length=150):
""" Train the snippet model according to the user stylometry.
Parameters
----------
corpus: list
A corpus of code, i.e., a list of excerpts of a repository
repo_url: str
The url of the repository
training_data_size: int, optional
The size of the training dataset (default `75000`)
actions_n: int, optional
The number of actions in the Q-table (default `12`)
states_n: int, optional
The number of states in the Q-table (default `13`)
alpha: float, optional
The alpha parameter in the reward function (default `0.5`)
gamma: float, optional
The gamma parameter in the reward function (default `0.85`)
epochs_basis: int, optional
The base number of epochs (default `50`)
extract_max_length: int, optional
The maximum length of extracts for being processed (default `150`)
Returns
-------
str
The name of the model folder
str
The name of the binary for the extractor model
"""
# Compute dataset with qlearning algorithm
raw_df = compute_dataset(corpus, actions_n, states_n, alpha, gamma,
epochs_basis, extract_max_length)
# Load dataframe
df = | pd.DataFrame(data=raw_df) | pandas.DataFrame |
#import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['font.sans-serif'] = 'Arial'
#import scipy.stats as ss
from slip_rate_fns import *
save_figs = True
'''Calculations'''
np.random.seed(420)
n_eqs = 15
print('Making example distributions...')
# make some example distributions for plotting
lgn_1000_500 = lognormal(1000, 500, return_pdf=True)
lgn_1000_1000 = lognormal(1000, 1000, return_pdf=True)
lgn_1000_2000 = lognormal(1000, 2000, return_pdf=True)
exp_1000 = exponential(1000, 1000, return_pdf=True)
print('Done.')
print('Making slip histories...')
# make slip histories
sim_years = int(2e6)
# Now the cumulative displacement histories are made.
# We make one 2-million-year history for each distribution.
cum_disp_logn_1000_500 = make_cum_slip(1000, 500, lognormal,
displacement_mean=1.,
displacement_std=0.,
yrs=sim_years)
cum_disp_logn_1000_1000 = make_cum_slip(1000, 1000, lognormal,
displacement_mean=1.,
displacement_std=0.,
yrs=sim_years)
cum_disp_logn_1000_2000 = make_cum_slip(1000, 2000, lognormal,
displacement_mean=1.,
displacement_std=0.,
yrs=sim_years)
cum_disp_exp_1000 = make_cum_slip(1000, 1000, exponential,
displacement_mean=1.,
displacement_std=0.,
yrs=sim_years)
print('Done.')
print('Calculating slip rate history windows...')
# calculate the windows.
windows = np.logspace(np.log10(500),
np.log10(100000), 50, dtype='int')
# calculate the slip rates for each series.
logn_1000_500_rates = {w : moving_average_rate(cum_disp_logn_1000_500, w)
for w in windows}
logn_1000_1000_rates = {w : moving_average_rate(cum_disp_logn_1000_1000, w)
for w in windows}
logn_1000_2000_rates = {w : moving_average_rate(cum_disp_logn_1000_2000, w)
for w in windows}
exp_1000_rates = {w : moving_average_rate(cum_disp_exp_1000, w)
for w in windows}
print('Done.')
print('Calculating statistics for each window...')
# calcualate the quartiles for each series.
pctiles = [1, 25, 50, 75, 99]
logn_1000_500_rate_quarts = {w: {p: ss.scoreatpercentile(
logn_1000_500_rates[w], p)
for p in pctiles}
for w in windows}
logn_1000_1000_rate_quarts = {w: {p: ss.scoreatpercentile(
logn_1000_1000_rates[w], p)
for p in pctiles}
for w in windows}
logn_1000_2000_rate_quarts = {w: {p: ss.scoreatpercentile(
logn_1000_2000_rates[w], p)
for p in pctiles}
for w in windows}
exp_1000_rate_quarts = {w: {p: ss.scoreatpercentile(
exp_1000_rates[w], p)
for p in pctiles}
for w in windows}
a_pctiles = [5, 25, 50, 75, 95]
def est_slip_adjust(rates, top_clip=None):
adj = np.copy(rates)
adj[rates > 0.] = 1 / rates[rates > 0.]
adj[rates == 0.] = np.inf
if top_clip is not None:
adj[adj > top_clip] = top_clip
return adj
tc = 5.
logn_1000_500_rq_adj = {w: {p: ss.scoreatpercentile(
est_slip_adjust(logn_1000_500_rates[w],
top_clip=tc), p)
for p in a_pctiles}
for w in windows}
logn_1000_1000_rq_adj = {w: {p: ss.scoreatpercentile(
est_slip_adjust(logn_1000_1000_rates[w],
top_clip=tc), p)
for p in a_pctiles}
for w in windows}
logn_1000_2000_rq_adj = {w: {p: ss.scoreatpercentile(
est_slip_adjust(logn_1000_2000_rates[w],
top_clip=tc), p)
for p in a_pctiles}
for w in windows}
exp_1000_rq_adj = {w: {p: ss.scoreatpercentile(
est_slip_adjust(exp_1000_rates[w],
top_clip=tc), p)
for p in a_pctiles}
for w in windows}
print('Done.')
print('Doing some convenience calculations for plotting...')
# make arrays from quartiles for faster plotting
logn_1000_500_rq_arrays = {p: np.array([logn_1000_500_rate_quarts[w][p]
for w in windows])
for p in pctiles}
logn_1000_1000_rq_arrays = {p: np.array([logn_1000_1000_rate_quarts[w][p]
for w in windows])
for p in pctiles}
logn_1000_2000_rq_arrays = {p: np.array([logn_1000_2000_rate_quarts[w][p]
for w in windows])
for p in pctiles}
exp_1000_rq_arrays = {p: np.array([exp_1000_rate_quarts[w][p]
for w in windows])
for p in pctiles}
logn_1000_500_adj_arrays = {p: np.array([logn_1000_500_rq_adj[w][p]
for w in windows])
for p in a_pctiles}
logn_1000_1000_adj_arrays = {p: np.array([logn_1000_1000_rq_adj[w][p]
for w in windows])
for p in a_pctiles}
logn_1000_2000_adj_arrays = {p: np.array([logn_1000_2000_rq_adj[w][p]
for w in windows])
for p in a_pctiles}
exp_1000_adj_arrays = {p: np.array([exp_1000_rq_adj[w][p]
for w in windows])
for p in a_pctiles}
# normalize results by mean EQ cycle length
eq_cycles = windows / 1000
print('Done.')
print('Plotting results...')
'''plotting'''
SMALL_SIZE = 6
MEDIUM_SIZE = 8
BIGGER_SIZE = 10
one_col = 8.3 / 2.54 # cm to in
two_col = 12. / 2.54 # cm to in
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# Plot recurrence distributions
t = np.linspace(0,8000, 500)
f1, ax1 = plt.subplots(1,1, figsize=(one_col, one_col))
#ax1.set_title('Earthquake recurrence distributions')
ax1.plot(t, lgn_1000_500.pdf(t), label='logn, μ=1000, σ=500', lw=0.5)
ax1.plot(t, lgn_1000_1000.pdf(t), label='logn, μ=1000, σ=1000', lw=0.5)
ax1.plot(t, lgn_1000_2000.pdf(t), label='logn, μ=1000, σ=2000', lw=0.5)
ax1.plot(t, exp_1000.pdf(t), label='exp, μ=σ=1000', lw=0.5)
ax1.legend(loc='best')
ax1.set_xlabel('Recurrence Interval (yr)')
ax1.set_ylabel('Probability')
# Plot slip distribution
f2, ax2 = plt.subplots(1,1, figsize=(one_col, one_col))
#ax2.set_title('Earthquake slip distribution')
ax2.plot(np.linspace(0, 5, num=500),
lognormal(1, 0.000001, return_pdf=True).pdf(np.linspace(0,5,num=500)),
'k')
ax2.set_xlabel('Slip (m)')
ax2.set_ylabel('Probability')
# Plot displacement histories
f3, (ax30, ax31) = plt.subplots(2,1, figsize=(two_col, two_col * 0.8))
# first 100,000 years
ax30.plot([0,1e5],[0,100],'k', lw=0.5)
ax30.plot(cum_disp_logn_1000_500[:int(1e5)],
lw=0.75, label='logn, μ=1000, σ=500 (periodic)')
ax30.plot(cum_disp_logn_1000_1000[:int(1e5)],
lw=0.74, label='logn, μ=1000, σ=1000 (unclustered)')
ax30.plot(cum_disp_logn_1000_2000[:int(1e5)],
lw=0.75, label='logn, μ=1000, σ=2000 (clustered)')
ax30.plot(cum_disp_exp_1000[:int(1e5)],
lw=0.75, label='exp, μ=1000 (unclustered)')
ax30.legend(loc='upper left')
#ax30.set_xlabel('years')
ax30.set_ylabel('cumulative displacement (m)')
# longer term
ax31.plot([0,1e6],[0,1000],'k', lw=0.5)
ax31.plot(cum_disp_logn_1000_500[:int(2e6)],
lw=0.75, label='logn, μ=1000, σ=500 (periodic)')
ax31.plot(cum_disp_logn_1000_1000[:int(2e6)],
lw=0.75, label='logn, μ=1000, σ=1000 (unclustered)')
ax31.plot(cum_disp_logn_1000_2000[:int(2e6)],
lw=0.75, label='logn, μ=1000, σ=2000 (clustered)')
ax31.plot(cum_disp_exp_1000[:int(2e6)],
lw=0.75, label='exp, μ=1000 (unclustered)')
ax31.legend(loc='upper left')
ax31.set_xlabel('years')
ax31.set_ylabel('cumulative displacement (m)')
# Plot event spacing example
f4, ax4 = plt.subplots(1,1, figsize=(one_col, one_col))
ax4.scatter(lognormal(1000, 500, n_eqs).cumsum(),
np.ones(n_eqs) * 1.5, marker='|',
c='C0', s=10, label='logn, μ=1000, σ=500 (periodic)')
ax4.scatter(lognormal(1000, 1000, n_eqs).cumsum(),
np.ones(n_eqs) * 1, marker='|',
c='C1', s=10, label='logn, μ=1000, σ=1000 (unclustered)')
ax4.scatter(lognormal(1000, 2000, n_eqs).cumsum(),
np.ones(n_eqs) * 0.5, marker='|',
c='C2', s=10, label='logn, μ=1000, σ=2000 (clustered)')
ax4.scatter(exponential(1000, 1000, n_eqs).cumsum(),
np.ones(n_eqs) * 0, marker='|',
c='C3', s=10, label='exp, μ=σ=1000 (unclustered)')
ax4.legend(loc='upper center')
ax4.set_ylim([-0.1,2.5])
ax4.set_xlabel('years')
ax4.set_yticks([])
# Plot slip rate envelopes
f5, (ax50, ax51, ax52, ax53) = plt.subplots(4, 1, figsize=(two_col, two_col),
sharex=True, sharey=True)
ax50.axhline(1, color='k', lw=0.5)
ax50.fill_between(eq_cycles, logn_1000_500_rq_arrays[1],
logn_1000_500_rq_arrays[99],
alpha=0.15, color='C0',
label='1-99th pctile',
lw=0)
ax50.fill_between(eq_cycles, logn_1000_500_rq_arrays[25],
logn_1000_500_rq_arrays[75],
alpha=0.5, color='C0',
label='25-75th pctile',
lw=0)
ax50.plot(eq_cycles, logn_1000_500_rq_arrays[50], 'C0-', label='median',
lw=0.75)
#ax50.set_ylabel('Estimated slip rate (mm/yr)')
ax50.legend(loc='upper right')
#ax50.set_title('Lognormal, mean=1000yr, std=500yr')
ax51.axhline(1, color='k', lw=0.5)
ax51.fill_between(eq_cycles, logn_1000_1000_rq_arrays[1],
logn_1000_1000_rq_arrays[99],
alpha=0.15, color='C1',
label='1-99th pctile',
lw=0)
ax51.fill_between(eq_cycles, logn_1000_1000_rq_arrays[25],
logn_1000_1000_rq_arrays[75],
alpha=0.5, color='C1',
label='25-75th pctile',
lw=0)
ax51.plot(eq_cycles, logn_1000_1000_rq_arrays[50], 'C1-', label='median',
lw=0.75)
ax52.axhline(1, color='k', lw=0.5)
ax52.fill_between(eq_cycles, logn_1000_2000_rq_arrays[1],
logn_1000_2000_rq_arrays[99],
alpha=0.15, color='C2',
label='1-99th pctile',
lw=0)
ax52.fill_between(eq_cycles, logn_1000_2000_rq_arrays[25],
logn_1000_2000_rq_arrays[75],
alpha=0.5, color='C2',
label='25-75th pctile',
lw=0)
ax52.plot(eq_cycles, logn_1000_2000_rq_arrays[50], 'C2-', label='median',
lw=0.75)
ax53.axhline(1, color='k', lw=0.5)
ax53.fill_between(eq_cycles, exp_1000_rq_arrays[1],
exp_1000_rq_arrays[99],
alpha=0.15, color='C3',
label='1-99th pctile',
lw=0)
ax53.fill_between(eq_cycles, exp_1000_rq_arrays[25],
exp_1000_rq_arrays[75],
alpha=0.5, color='C3',
label='25-75th pctile',
lw=0)
ax53.plot(eq_cycles, exp_1000_rq_arrays[50], 'C3-', label='median',
lw=0.75)
#ax51.legend(loc='upper right')
#ax51.set_title('Exponential, mean=1000yr, std=1000yr')
ax52.set_ylabel('Estimated slip rate (mm/yr)')
ax53.set_xlabel('Mean number of earthquakes (or thousand years of window length)')
#ax50.set_xscale('log')
#plt.xlim([0, 20])
ax50.set_ylim([0,4])
ax50.set_xlim([0,60])
def plot_rate_quartiles(quarts=None, median=True, xlim=None, ylim=None,
log_x=False, log_y=False, fill_between=False,
mean=1, ax=None, lw=1.):
if mean is not False:
ax.axhline(mean, color='k', lw=0.5)
ax.plot(eq_cycles, logn_1000_500_rq_arrays[quarts[0]], color='C0', lw=lw)
ax.plot(eq_cycles, logn_1000_500_rq_arrays[quarts[1]], color='C0', lw=lw
#label='logn, μ=1000, σ=500'
)
ax.plot(eq_cycles, logn_1000_1000_rq_arrays[quarts[0]], color='C1', lw=lw)
ax.plot(eq_cycles, logn_1000_1000_rq_arrays[quarts[1]], color='C1', lw=lw
#label='logn, μ=1000, σ=1000',
)
ax.plot(eq_cycles, logn_1000_2000_rq_arrays[quarts[0]], color='C2', lw=lw)
ax.plot(eq_cycles, logn_1000_2000_rq_arrays[quarts[1]], color='C2', lw=lw
#label='logn, μ=1000, σ=2000',
)
ax.plot(eq_cycles, exp_1000_rq_arrays[quarts[0]], color='C3', lw=lw)
ax.plot(eq_cycles, exp_1000_rq_arrays[quarts[1]], color='C3', lw=lw
#label='exp, μ=σ=1000',
)
if median is True:
ax.plot(eq_cycles, logn_1000_500_rq_arrays[50], 'C0--', lw=lw)
ax.plot(eq_cycles, logn_1000_1000_rq_arrays[50], 'C1--', lw=lw)
ax.plot(eq_cycles, logn_1000_2000_rq_arrays[50], 'C2--', lw=lw)
ax.plot(eq_cycles, exp_1000_rq_arrays[50], 'C3--', lw=lw)
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if log_x is True:
ax.set_xscale('log')
if log_y is True:
ax.set_yscale('log')
return ax
#f6, ((ax61, ax62), (ax63, ax64)) = plt.subplots(2,2, figsize=(two_col, two_col))
#
#ax61 = plot_rate_quartiles(quarts=(1,99), ax=ax61, median=False,
# ylim=(0,4), lw=0.75)
#
##ax61.legend(loc='upper right')
##ax61.set_title('Estimated slip rates,\n1-99th percentiles')
#ax61.set_ylabel('Estimated slip rates (mm/yr)')
##ax61.set_xlabel('N Earthquakes (or thousand years)')
#
#
#ax62 = plot_rate_quartiles(quarts=(25,75), ax=ax62, median=True,
# ylim=(0,2), lw=0.75)
#
##ax62.legend(loc='upper right')
##ax62.set_title('Estimated slip rates,\n25-75th percentiles and median')
##ax62.set_ylabel('Estimated slip rates (mm/yr)')
##ax62.set_xlabel('N Earthquakes (or thousand years)')
#
#
#ax63 = plot_rate_quartiles(quarts=(1,99), ax=ax63, median=False,
# ylim=(0,4), xlim=(0,30), lw=0.75)
#
##ax63.legend(loc='upper right')
#ax63.set_ylabel('Estimated slip rates (mm/yr)')
#ax63.set_xlabel('N Earthquakes (or thousand years)')
#
#
#ax64 = plot_rate_quartiles(quarts=(25,75), ax=ax64, median=True,
# ylim=(0.5,1.5), xlim=(0,30), lw=0.75)
#
##ax64.legend(loc='upper right')
##ax64.set_ylabel('Estimated slip rates (mm/yr)')
#ax64.set_xlabel('N Earthquakes (or thousand years)')
f7, (ax70, ax71, ax72, ax73) = plt.subplots(4, 1, figsize=(two_col, two_col),
sharex=True, sharey=True)
ax70.axhline(1, color='k', lw=0.5)
ax70.fill_between(eq_cycles, logn_1000_500_adj_arrays[5],
logn_1000_500_adj_arrays[95],
alpha=0.15, color='C0',
label='5-95th pctile',
lw=0)
ax70.fill_between(eq_cycles, logn_1000_500_adj_arrays[25],
logn_1000_500_adj_arrays[75],
alpha=0.5, color='C0',
label='25-75th pctile',
lw=0)
ax70.plot(eq_cycles, logn_1000_500_adj_arrays[50], 'C0-', lw=0.75,
label='median')
ax71.axhline(1, color='k', lw=0.5)
ax71.fill_between(eq_cycles, logn_1000_1000_adj_arrays[5],
logn_1000_1000_adj_arrays[95],
alpha=0.15, color='C1',
lw=0)
ax71.fill_between(eq_cycles, logn_1000_1000_adj_arrays[25],
logn_1000_1000_adj_arrays[75],
alpha=0.5, color='C1',
lw=0)
ax71.plot(eq_cycles, logn_1000_1000_adj_arrays[50], 'C1-', lw=0.75)
ax72.axhline(1, color='k', lw=0.5)
ax72.fill_between(eq_cycles, logn_1000_2000_adj_arrays[5],
logn_1000_2000_adj_arrays[95],
alpha=0.15, color='C2',
lw=0)
ax72.fill_between(eq_cycles, logn_1000_2000_adj_arrays[25],
logn_1000_2000_adj_arrays[75],
alpha=0.5, color='C2',
lw=0)
ax72.plot(eq_cycles, logn_1000_2000_adj_arrays[50], 'C2-', lw=0.75)
ax73.axhline(1, color='k', lw=0.5)
ax73.fill_between(eq_cycles, exp_1000_adj_arrays[5],
exp_1000_adj_arrays[95],
alpha=0.15, color='C3',
lw=0)
ax73.fill_between(eq_cycles, exp_1000_adj_arrays[25],
exp_1000_adj_arrays[75],
alpha=0.5, color='C3',
lw=0)
ax73.plot(eq_cycles, exp_1000_adj_arrays[50], 'C3-', lw=0.75)
ax70.legend(loc='upper right')
ax72.set_ylabel('Epistemic uncertainty in rate measurement')
ax73.set_xlabel('Mean number of earthquakes (or thousand years)')
plt.xlim([0,40])
plt.ylim([0,5])
print('Done.')
print('Re-calculating values for epistemic uncertainty table...')
tc = None
a_windows = [2531, 4843, 10323, 42103]
logn_1000_500_rq_adj = {w: {p: ss.scoreatpercentile(
est_slip_adjust(logn_1000_500_rates[w],
top_clip=tc), p)
for p in a_pctiles}
for w in a_windows}
logn_1000_1000_rq_adj = {w: {p: ss.scoreatpercentile(
est_slip_adjust(logn_1000_1000_rates[w],
top_clip=tc), p)
for p in a_pctiles}
for w in a_windows}
logn_1000_2000_rq_adj = {w: {p: ss.scoreatpercentile(
est_slip_adjust(logn_1000_2000_rates[w],
top_clip=tc), p)
for p in a_pctiles}
for w in a_windows}
exp_1000_rq_adj = {w: {p: ss.scoreatpercentile(
est_slip_adjust(exp_1000_rates[w],
top_clip=tc), p)
for p in a_pctiles}
for w in a_windows}
logn_1000_500_adj_arrays = {p: np.array([logn_1000_500_rq_adj[w][p]
for w in a_windows])
for p in a_pctiles}
logn_1000_1000_adj_arrays = {p: np.array([logn_1000_1000_rq_adj[w][p]
for w in a_windows])
for p in a_pctiles}
logn_1000_2000_adj_arrays = {p: np.array([logn_1000_2000_rq_adj[w][p]
for w in a_windows])
for p in a_pctiles}
exp_1000_adj_arrays = {p: np.array([exp_1000_rq_adj[w][p]
for w in a_windows])
for p in a_pctiles}
print('done.')
print('making table...')
def make_perc_df(name, rates, winds=[2500, 5000,10000,40000],
pctiles=[5,25,50,75,95]):
ts = [windows[np.argmin(np.abs(windows-w))]
for w in winds]
cols = ['dist','t'] + pctiles
df = | pd.DataFrame(columns=cols, index=ts) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.